[rust-3p] Update to tokio 1.19.2

Change-Id: If24fa3327cf5257696f0b613b2fef7aa41603ada
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/693864
Reviewed-by: David Koloski <dkoloski@google.com>
Fuchsia-Auto-Submit: David Koloski <dkoloski@google.com>
Commit-Queue: David Koloski <dkoloski@google.com>
diff --git a/third_party/rust_crates/BUILD.gn b/third_party/rust_crates/BUILD.gn
index 3ecc309..efc55c8 100644
--- a/third_party/rust_crates/BUILD.gn
+++ b/third_party/rust_crates/BUILD.gn
@@ -908,7 +908,7 @@
 }
 
 group("tokio") {
-  public_deps = [ ":tokio-v1_17_0" ]
+  public_deps = [ ":tokio-v1_19_2" ]
 }
 
 if (current_os == "fuchsia") {
@@ -5022,7 +5022,7 @@
   deps += [ ":httpdate-v1_0_2" ]
   deps += [ ":itoa-v0_4_3" ]
   deps += [ ":pin-project-lite-v0_2_4" ]
-  deps += [ ":tokio-v1_17_0" ]
+  deps += [ ":tokio-v1_19_2" ]
   deps += [ ":tower-service-v0_3_0" ]
   deps += [ ":tracing-v0_1_29" ]
   deps += [ ":want-v0_3_0" ]
@@ -5055,7 +5055,7 @@
   deps += [ ":log-v0_4_11" ]
   deps += [ ":rustls-v0_19_1" ]
   deps += [ ":rustls-native-certs-v0_5_0" ]
-  deps += [ ":tokio-v1_17_0" ]
+  deps += [ ":tokio-v1_19_2" ]
   deps += [ ":tokio-rustls-v0_22_0" ]
   deps += [ ":webpki-v0_21_0" ]
 
@@ -10596,10 +10596,10 @@
   visibility = [ ":*" ]
 }
 
-rust_library("tokio-v1_17_0") {
+rust_library("tokio-v1_19_2") {
   crate_name = "tokio"
   crate_root = "//third_party/rust_crates/vendor/tokio/src/lib.rs"
-  output_name = "tokio-6f23cbd082f0fe53"
+  output_name = "tokio-93e9171240b81c17"
   configs -= [ "//build/config/rust:2018_idioms" ]
 
   deps = []
@@ -10610,8 +10610,8 @@
   rustflags = [
     "--cap-lints=allow",
     "--edition=2018",
-    "-Cmetadata=6f23cbd082f0fe53",
-    "-Cextra-filename=-6f23cbd082f0fe53",
+    "-Cmetadata=93e9171240b81c17",
+    "-Cextra-filename=-93e9171240b81c17",
     "--cfg=feature=\"default\"",
     "--cfg=feature=\"sync\"",
   ]
@@ -10627,7 +10627,7 @@
 
   deps = []
   deps += [ ":rustls-v0_19_1" ]
-  deps += [ ":tokio-v1_17_0" ]
+  deps += [ ":tokio-v1_19_2" ]
   deps += [ ":webpki-v0_21_0" ]
 
   rustenv = []
diff --git a/third_party/rust_crates/Cargo.lock b/third_party/rust_crates/Cargo.lock
index a9b4f4f..148db69 100644
--- a/third_party/rust_crates/Cargo.lock
+++ b/third_party/rust_crates/Cargo.lock
@@ -4123,9 +4123,9 @@
 
 [[package]]
 name = "tokio"
-version = "1.17.0"
+version = "1.19.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
+checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439"
 dependencies = [
  "pin-project-lite",
 ]
diff --git a/third_party/rust_crates/Cargo.toml b/third_party/rust_crates/Cargo.toml
index fe5a1dc..d60a8b5 100644
--- a/third_party/rust_crates/Cargo.toml
+++ b/third_party/rust_crates/Cargo.toml
@@ -196,7 +196,7 @@
 test-case = "1.1.0"
 textwrap = "0.11.0"
 thiserror = "1.0.23"
-tokio = { version = "1.17.0", default-features = false }
+tokio = { version = "1.19.2", default-features = false }
 toml = "0.5"
 toml_edit = "0.2.1"
 tracing = { version = "0.1.29", features = ["log"] }
diff --git a/third_party/rust_crates/vendor/tokio/.cargo-checksum.json b/third_party/rust_crates/vendor/tokio/.cargo-checksum.json
index f15d517..8ed0e84 100644
--- a/third_party/rust_crates/vendor/tokio/.cargo-checksum.json
+++ b/third_party/rust_crates/vendor/tokio/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"b6a95c0150a61774987525cfc386fea67df149d4e2fbc53dc50acd1d45192e19","Cargo.toml":"70624bf691cd4ffb004cc2622e01250c3d42b9e043ed68248151ce3848556ce1","LICENSE":"697fc7385b1b0593f77d00db6e3ae8c146c2ccef505c4f09327bbedf952bfe35","README.md":"390b17bca7f2aac8e0ae1d76893708dd79c93f367d377ab28744d38587a7e274","docs/reactor-refactor.md":"f7dcc42be8ae33446be399add73f234fc217a5d67478cfcac65601058ab50d47","src/blocking.rs":"8e62b2cdc512fedbca4b4c4f983629af035afea4ee7e918bb1a3e9851c8e034e","src/coop.rs":"ef7e148a1e2023657f9e4e2c927760a28b3e7d0684e8e5ccaebcebe90d7033e6","src/doc/mod.rs":"313fe6b3eb0e0e7a9d9cb070cc18e8749f5dd44724b651a90af46acf02a4f7af","src/doc/os.rs":"4af4a78a0af962f94e5e8f67858350e4fedb3a5f990db277a0aa8bf4f7b923b1","src/doc/winapi.rs":"f8b16da97e4822116be6a59ef0d9549a857861cecd584353a7fb02df173dde9c","src/fs/canonicalize.rs":"93c64b72abdca17877d6ab61d50a43765d6aef9e0a9f7aaf41b6b0b7d9a8a380","src/fs/copy.rs":"262180fadc66e5ac2bf1e8389628fdd039f14788e66a4f8b10e10633e7310f20","src/fs/create_dir.rs":"233cbab2579a787614aeee88845a57f1578e9c2e064a3456c799f61430e911ad","src/fs/create_dir_all.rs":"56081d541caadca0fc59e84d55e78e702fe9373679598016224ad0b072b189a7","src/fs/dir_builder.rs":"b5b21229c7bf15e2074afab1accfbc392f3c69e546c7b652cfdc8e90d5a4a301","src/fs/file.rs":"193078a2addd11de302024959c79984f13933e5e420098641fc71327bdd90d78","src/fs/file/tests.rs":"b00dfdb1cf5347e773d268447b0cb1ae4aafd036abb7703ba0d6bf404e155cf9","src/fs/hard_link.rs":"98cccbbb3719baee11c232e79723ab1cb3d6c8056bddb109c4990fe2c236c1fb","src/fs/metadata.rs":"782a1a5dbc2cd6c40e928579fbfcf39e5f1de28def78781590c0280acdf02960","src/fs/mocks.rs":"d380cd15d2644ed240f66a2acc7168b5a51f1357a65eb764cfadc98c42676013","src/fs/mod.rs":"eced6fd2a09a6e458556caafbd444753a3577a5dbbe11688f31c0e1e122879c2","src/fs/open_options.rs":"e1f78d96005082e6886de6fc8e6545cdb72e4e4fd444549b53c5bd7addd8510a","src/fs/open_options/mock_open_options.rs":"f08d384602bbd165eed4cf8811a19512d62c4d9573457f8d22c78c77b1091629","src/fs/read.rs":"055ae8b6ae96ebae2d05f8780e7592bb587a742506c6df8ee8b380fc7d2820ef","src/fs/read_dir.rs":"76a90e7a465da2e977392b65265dfa766c19d8b25967a278a5ac20f6a0543243","src/fs/read_link.rs":"93c104a21253372fef7056ab82e6065b0a1a8fc9be8b7329dfd5a8dd07c618b0","src/fs/read_to_string.rs":"9e5b2d476a6084e32a92c5421a8abc9d4f335f4ec677beec4bf8bfa109d7d106","src/fs/remove_dir.rs":"96475771e9c52678768288d8df6814216e0801bebc848481597ad34e829a5854","src/fs/remove_dir_all.rs":"b85abd05c7ab64ee8dc6cf5663a11e713aa51b357759ef660ef3cae3365ccc42","src/fs/remove_file.rs":"1cdf8bf16b3a164c594dac8773b7d1f9ebb28de169343184d34d6aac3b3a7eaa","src/fs/rename.rs":"a97875e92626fa46e23fece7b8698c9c4cea2bae8f1be8726f30ae6fe80ae0c7","src/fs/set_permissions.rs":"8adccafa475dcfc1bc3989af73374d90683c1be4953ef812e5fd606f968d7b7a","src/fs/symlink.rs":"32cf3e906531d30ebe6d8be7ee3bfe049949759b566015b56d0851f51abcff50","src/fs/symlink_dir.rs":"5fbd05365555ba7942ffc1c2dfddf201ddad2cf9b005be2ea99849a473fe982b","src/fs/symlink_file.rs":"a1170fd40a000dc449de972267f579a0d14f50dbb39466f985f183fdcd1d3438","src/fs/symlink_metadata.rs":"f5ce1e05f137da995e3e0d9582bae0a5f7ef4251285c64e912b0eedbb068b395","src/fs/write.rs":"1ffb734d31748bd879ad398b0fe99bdec569782b42677022957db2cae95c4d2d","src/future/block_on.rs":"ef7fd744d2727f169b6d8997e2202af88a256bf41a894c93a4490ade65fa0e57","src/future/maybe_done.rs":"9042619f2a907e81763ac6082c080faa28af5d571dd49e82b60f3f14d58598f3","src/future/mod.rs":"6f28857347c50cd6467f1ca7d1b1028b633de7d142c3f3ca710a8693d1e839f9","src/future/poll_fn.rs":"e1a8393f18d6cfa9d7d0c8912b10b5b65cc9a2b507d8d1ac1c5c3be4e2ba057a","src/future/trace.rs":"c42712a8d372922eba7e05cd21382fe5df5eec02cbcc870062100b59ab99654f","src/future/try_join.rs":"0ea5a069b17a34bbc091acbd74b9d51794a55a85dfa63fe2404d5ee91a4f0038","src/io/async_buf_read.rs":"b37caa8f6c974b3c97327c635218803e573c531d4197950840549aa794357c99","src/io/async_fd.rs":"b3eb76f964bed4ec0b3d20059a038e178c13e462f25860b824d7b754109ede2f","src/io/async_read.rs":"f52c8d2f4a283c0dc8d06dc974484749973125b0b691bc0c3d100972ac67cb92","src/io/async_seek.rs":"a9a0df389ff2be3d79208ec475fcfede46a86f6ea0b822b1a4ce8273ec714b0b","src/io/async_write.rs":"198ed6a475b6f093fd2ff15e618d816d7e33cb8bc28d2e2299533e5df0bd78d6","src/io/blocking.rs":"d6613950dc10f17287ff396d6acc7f386bed1ffaaeb71dcbb10ad4ffcfb29ed7","src/io/bsd/poll_aio.rs":"893aab3d616e33fbb6ea61e6d590e80d2871e676c0647a58041b16e4698ca34e","src/io/driver/interest.rs":"726c813211a8606f68e58a2693cccdf3788f252aa1e9700af088ba756ba36c1d","src/io/driver/mod.rs":"66325853609db8e4cf51de3f4887a1de0a5c42879128aac66d1aa2cf39da6577","src/io/driver/platform.rs":"023acd3f2703d241b3e91ab6e4d4c0bc5ccc3451655fffd9f37938224f915494","src/io/driver/ready.rs":"e4bac7c50bf7c2d29b3516e9184c2a46cc4631366ae940c41cc5320bb35a9250","src/io/driver/registration.rs":"b3a62b64014181839c8d13aa5d13da27d8d173c18feb18c3f03246b08404c0d2","src/io/driver/scheduled_io.rs":"d8da8185d1c72af922746737769a612457f053eb529a465d242de641fde1c731","src/io/mod.rs":"1d32445c95577065827eb98ef842bf4c9be842da376ef95c565c0a65820c55f6","src/io/poll_evented.rs":"e3b9765a8630b497be71acfbee58fb00174399e4755e04859d418ed82350094b","src/io/read_buf.rs":"8a79bdd31b7d77dd585db87dde45f4208f7f131a6614b51f5a776d512b435c2c","src/io/seek.rs":"e9e346fc926c3360601b80a8319a25fd0567dd6f77fab666694e9787deaef633","src/io/split.rs":"2c6982598a86620b76826ba6065953e9fc78a0434652fe97d82d79f7bbcb66b3","src/io/stderr.rs":"8f4d9fc5e596180171b68919bde951c49d134446d30af06dbbbe413ff2882ff5","src/io/stdin.rs":"5c15de00a7fe2ffee2f48f2ecb2a4fa765f3de891f8cdbc240e2988db7cc0d13","src/io/stdio_common.rs":"6e54019e53594a632dd3536e575ca11ee7b279977ee92c556ef39c46255f6b1f","src/io/stdout.rs":"2c1b11ae547b0eec06e2b21039655a2acf34125b789548c7bd362babf5b05cd2","src/io/util/async_buf_read_ext.rs":"7f7fde910ecd9e526bb85882a318f457dedf6ccc2cdbc693112562186dfb78de","src/io/util/async_read_ext.rs":"f70825f0be7ebecce5473c53c716cc57f1123e5818f7b917444c3892639a5e2c","src/io/util/async_seek_ext.rs":"804ea19c98a28aacedc38852f692f59c52f1327807a0620c1740182ac6b01e02","src/io/util/async_write_ext.rs":"6256ed8320aa225fbe9ba159cdc20b08e1e22bcab4c090fdb1dca1cdbc2d97b4","src/io/util/buf_reader.rs":"670a58f404e5689daf1f2b3070b0b9e95fef96ad19f0e8487f294e8a2afe558d","src/io/util/buf_stream.rs":"2246fe71b707c15d7168c5da5ee158cec6e854d4fd11b685531c16a9c3cf2c6a","src/io/util/buf_writer.rs":"f9c3e018c9f9177fb6d910096503caee727bebd3c36f5f67dca2c4c55044408a","src/io/util/chain.rs":"5cd8df2cc7bbcd18ca2336a78507fa8009c0a9e595f81730a8c16cadb8f731a2","src/io/util/copy.rs":"34f164f2169caa58fc9c43415108e61c662aece2e2f3010470d89b3fc3e2aedd","src/io/util/copy_bidirectional.rs":"0f72d957fa2d1b3517224e34c65f06016f5e04857d45b01c368398fb95e555f5","src/io/util/copy_buf.rs":"b029d0ee8da5301a06722d425828418027234a8b111a209fa49ead161263aa8e","src/io/util/empty.rs":"48f23da224ff6c5d3803b24e4d5e6a18c75b31344a4570081bd0b3c3e0244fa6","src/io/util/fill_buf.rs":"223725d828071e923f25d2d49a0f6e470c411a6d9ba225700f2dd8d5793601bb","src/io/util/flush.rs":"fe3b4ff226e294843b8cbea9dc4e02d581582b78ddaafce137c96e290699c718","src/io/util/lines.rs":"1d9f9b99567111c911e72a4caa2abb19b277f2cdd0ca3268ac5ca6df5276259f","src/io/util/mem.rs":"222519afeb028305f7d7165e8ded58aceeabc5935b223ae73bb2413703565ddd","src/io/util/mod.rs":"6a9012d78fe2bed8240e7a628e9421cbef45433551522624065fdcbb329f3594","src/io/util/read.rs":"01c1113b6171c83ba2a0ad774703c3139b0ff11f47953f1b50861334d44f86ec","src/io/util/read_buf.rs":"a87be2d115c09a6782ec8cadeafc92fb1fbe534580e71540087c3298a03bfca2","src/io/util/read_exact.rs":"4a8650fd7a885963a0fef2bec24c17046c5624e4dd7fe229ab3f33c4a92fc66c","src/io/util/read_int.rs":"49da230796335df584832cd7deb8370b4d1e0350d743046389a9d9ae17dbd94f","src/io/util/read_line.rs":"9cdb2d778b81bc50098a6851981ed9f541bd0c7896c0763b811971b5a598b7e8","src/io/util/read_to_end.rs":"7aca8032ee911467c24ce435b7bcf023c72ca6f19c2663269ff50d31913d6e01","src/io/util/read_to_string.rs":"fafb5463b013cc8f76def3a505dbebd179afc95bde0e2ca9388e428265788924","src/io/util/read_until.rs":"b2a2a7c434439fd2c9269494e59edbe1f75eb45449dd6be384663a6ceaf137ac","src/io/util/repeat.rs":"d4effcd81338831eb373cf2db972a99218b8379b91066940a732edcf4524c7c2","src/io/util/shutdown.rs":"971454342b4636fbd68e123d59d87017d81f72afb410c385846069b11def8efe","src/io/util/sink.rs":"0dcb794e48ca9b1c28e5f9f2051073ea0951a54c9c7dfc903ce9e5489d3d8cd7","src/io/util/split.rs":"03a59adccda29608886e38a1f484fbd4d6a6019180c4cfa851372d250796aa5a","src/io/util/take.rs":"ef080ce27d23cc23b9f99fe14ddd56349a3cb1442aba18f8405c30d20017b074","src/io/util/vec_with_initialized.rs":"06f3a452c0158b4b72c6fdbddd6505eec8ce1893abce0420db1e79897d63380b","src/io/util/write.rs":"20d14ee545ab1f67732915522e97808d1ddde13d151505c1289b596be519f7c8","src/io/util/write_all.rs":"906ff3fb24c6a979b104598f9a8229421bcaf2a4218c28069504b34a218241f6","src/io/util/write_all_buf.rs":"5911bf673ef89097938f4e2e38d9012865b28a0ce5ebb217ebe0e2507de6c1e3","src/io/util/write_buf.rs":"ab51d6174de24cbb729ce77dbaeea27e16059b8253e4830d8243ec5f08a08a8c","src/io/util/write_int.rs":"f321e69b0c7c01728b079e9fdeedb96c26475667e8b259d0c5f4a83d060990d1","src/io/util/write_vectored.rs":"7a335a9f796daa048fa9708dc44d32d2567b36461a6d88f07893eb31f304b69d","src/lib.rs":"6a9ed401d3d763c6d13faf5966d6a8b9b5ec8866956e2838190c18b5dc2c831f","src/loom/mocked.rs":"6db5ed42cd469ac1f98d04c535e59aea174d6c06aed5f9f0c5b5254dc6e476b9","src/loom/mod.rs":"b14b9333a7a21bd125a4ae82f01e5ea9c9ed2f78d7d1ad49a13d9b176f1fe8ab","src/loom/std/atomic_ptr.rs":"16d7e6f841697020aa315a303f26cca714c35a96c4912ae45b90ad3ab0715e28","src/loom/std/atomic_u16.rs":"8793a4927367270305a6a3a68423ccc838cead0960ab1a7cb345681182916c14","src/loom/std/atomic_u32.rs":"39889c3295f5a201ecbd4ce3f5f942d88931fa9988071f771935dfd0c6c3443c","src/loom/std/atomic_u64.rs":"2f8314f2ef093a8701d1ddfcd88278da52448c0ea9f0505bfd33c05e3e26f8fe","src/loom/std/atomic_u8.rs":"98f6baee74059eea2fc950dabc273a2fcb3518950015fb3a5acb3dbc58ffac03","src/loom/std/atomic_usize.rs":"ce7e840ac029a91adb90d7834c2bec3a8ef5fcf0b311de0bb6900c0be199301f","src/loom/std/mod.rs":"0503d16f472852d2fdb1bf9edced988a264ec933e9ba678de65f9d72b5e88d6c","src/loom/std/mutex.rs":"83938246987904d2cf0fbd76566170e62ab89a2e10dc309f8aa4e149cdaba74e","src/loom/std/parking_lot.rs":"89c0b87687dcfe5b200a82f78ab9517b40c42f07da4b9ced59067b76a8c391f6","src/loom/std/unsafe_cell.rs":"05e2b4d8e78b0435650c6da6239ce8c53b4de2573e64ba098354b049f23429ec","src/macros/cfg.rs":"2b624d6d114560809f839b49491dc36e0108977dd2131a8f02bd20c07be8b654","src/macros/join.rs":"54fb8935246c4524e958a5bd80b9286f3c523c3a613adecfe204bfd4504f925a","src/macros/loom.rs":"80d2e4af9fc50d0bda1b20b95f8873b2f59c3c0e70f2e812a6207855df76204e","src/macros/mod.rs":"913c824be0242590352d952a2ec445ef1d3d77e1512cdea4e2be7f4956cb9bf5","src/macros/pin.rs":"294e5644061e41801dcee5494b7334439e09af0b6219ce164090feb624864631","src/macros/ready.rs":"6efd4c866c4718c3a9a7b5564b435e2d13e9c1ae91fd98b1313d5e7c182942d6","src/macros/scoped_tls.rs":"d598cf0c6e57d91d56432898ebab741aec3fa07b08e0ab5f91dd151127f4ae3e","src/macros/select.rs":"4832b4aacaff0f7c79c97f3c707e613086accadec6a3f15857c94832ee35aeb3","src/macros/support.rs":"0cf0789ba2dc5a196ccbabba9ca9f1861a71284032b98dc7c372078c12c5d5ce","src/macros/thread_local.rs":"8602495ed102b63e3048a261eda7483dc9a24b15a74d7059c31635e8f45de19a","src/macros/trace.rs":"33befd4533a3b2b4b22e246033f2bea8930174a7da58adaa57dbf20931275bcd","src/macros/try_join.rs":"fdeb6c3d4b56b81cd62f2532bf2373494fecd301e3c3a4afc66e4f1651bd7a9e","src/net/addr.rs":"dea03c41778eae06672d294bb05b41e31cee4efae7059bb9cd83a9c6f4664a7f","src/net/lookup_host.rs":"c7a21d735225e316253d822f6b11a17107e6a8db004f947a54d8bc12ec782baf","src/net/mod.rs":"b75d65aee9871a9b5ead894a01199d0af48f993e12a5e49041a91148739876bc","src/net/tcp/listener.rs":"c0f9d5e115da4127ce6792122e48d39dba1888928411241a3e4982ea68f38767","src/net/tcp/mod.rs":"d33297e086d7fcc6c46584782d7b24f65578bf9895844f1ec6cde95cc5b8902d","src/net/tcp/socket.rs":"df83f65dc8ec7de1619217697d43c13f3260c7b64f7b41c42ee850b0d5b4760a","src/net/tcp/split.rs":"e477c847baf20c5f0235a40720a60fd3564cab66bef37f48c2a17cdf95de96ad","src/net/tcp/split_owned.rs":"55f393899efa77a41571e64d96507a374c413050ee866144c8f57b3d2935b2d4","src/net/tcp/stream.rs":"3dec8d775f1cf050ea90359902f8c4aa444a9cc04660247d48762b33e7db1b04","src/net/udp.rs":"60943a9703084926639a4ac3bfbc62a5276a8cf9ebde479f11e9992f0f82ea93","src/net/unix/datagram/mod.rs":"fc48924e5d1e551405b0708a2d122473cdafeeee802a5722f64b4cf41a1c01da","src/net/unix/datagram/socket.rs":"f888d8c1499693e1b4fd92e17596a310c81679a0dcb5a3f528d151ec36158665","src/net/unix/listener.rs":"eb9f0deb2f6f292b3224f5fc4c6aa12fd4e842e867e75ededcf4859276dc1b8a","src/net/unix/mod.rs":"2c18ab3fef385be4b1603cdd391f557f36a3188b8bebc3949647b27bbd6d84b4","src/net/unix/socketaddr.rs":"66bf18321a81baeb394a7094567632b113e44e12d2643109b6a97c89d919bf3a","src/net/unix/split.rs":"9f6c51cc59c8b2c6bd8e7b8f918486ccd8d200973e240c58ce26490569db2182","src/net/unix/split_owned.rs":"e0640e4fd2b6be95139f7f1aa014c149fc2dc834e7264382b7ff84750b93e10b","src/net/unix/stream.rs":"3a9073f8c6ee7dabe170c19b06e09db0bb280c7e3f20aea8a6ee49325f60d7e1","src/net/unix/ucred.rs":"836fe68abe151abcaaf4d9bc92025a12030f1c2b7beb314cc453d0d88aa316d1","src/net/windows/mod.rs":"a1525f35c1acb92b15bec788a625b76acb42f9424f392074db697102ce79760d","src/net/windows/named_pipe.rs":"670aa7d368c84b92c29ceb3cd21776cf1e8dc310aa4f46600ab0f4cb9d5637a6","src/park/either.rs":"251c6255768445ca2bcfb037b1fcffa05e9ca0f4f31923ffcf341eb3cd794180","src/park/mod.rs":"0e8d343742e6e498c4f4d44a837d01da2e75012aada2117641937c56f85abc8f","src/park/thread.rs":"f5c4885e6c7e3f6e33e87774e79ef3bbef13fc7a4e7f9c5f27a86d74fec8f275","src/process/kill.rs":"2f98bd1bd28ab37bedc34ab7b737760407ab5315420538acbd18da31d2662d94","src/process/mod.rs":"dd4724fa9dae1c875d1ad8c7b6aa41f4a386851dfadbf0bcb11d29204ae13583","src/process/unix/driver.rs":"073baa2a7b1065745793a219cb7045907f339f71945943dcb3c52a2cfeb7ea45","src/process/unix/mod.rs":"ab58ab44dfd8ec1da5dbd064faad0ffdb10c2c70c56e741cb5726648dec12f4e","src/process/unix/orphan.rs":"9927c3c918bead0b56b1a9256b8c8eba9f9f16f87f0c81866278b65bb96a1f72","src/process/unix/reap.rs":"62868319849b8482d8d927dcd00cc8a74b9af61fd47494b39bd41fe2f4dcf0b6","src/process/windows.rs":"feb605a16ea975886b38a66f30b3aad97f7e9ae73d35eb2ad45ccaae36d0e4e8","src/runtime/basic_scheduler.rs":"f45e55922b4eee2dff049496eef226ccca17ec1c308adaa394982fbd0c97e0c1","src/runtime/blocking/mod.rs":"26ce33f7e51e3afc1f18c2e2e110420f5d9956f9c8738cf171b38d4f853bb8e4","src/runtime/blocking/pool.rs":"47cd86a446428bf7fd1e44c5b7fb0410ce00cd9281002d26601bda644ec62393","src/runtime/blocking/schedule.rs":"fef31b442b9e743a4e0f3d490cc320ecab17b475f2be7c995a57fd2af7b34b88","src/runtime/blocking/shutdown.rs":"964c67d028152e4daa3d424d90297f4589973114a527dd081c90cbc6c80f6fec","src/runtime/blocking/task.rs":"60b68f1dc13c84a64376f078a0eed4cd9e9f160e9ac0a069b2a8b11eb748eb5b","src/runtime/builder.rs":"4380af8e914969e4c9a092c45110e856d95dd9b7e37c93723dc1866f5bc3b7be","src/runtime/context.rs":"fcaf27b7a953435fd0c3204f259d229a881c66e89eb0c0293d40750dc19a0571","src/runtime/driver.rs":"763324ba05d5901acd060532551cc25c539ee9c1c9a5a87b72450fe74f2df394","src/runtime/enter.rs":"76d42510a6f0bf159746268c316071d44daebf7523347c032917f55031f7880f","src/runtime/handle.rs":"8f0ddf7da5106950d77d3c6ca64fac88798f6e13cc9c84ce6f427360a3b0abe7","src/runtime/metrics/batch.rs":"fbe9c32fb1c5643b7cc3a2f563e2bd1582ea068a5c1215a151a50f1855c2719b","src/runtime/metrics/mock.rs":"0daeb89a321a7922ff28432122ece55b818ba60469d63531e9a3b29eae110647","src/runtime/metrics/mod.rs":"4a97af964e1b3b0cc1866336ee42e9aaff64f9b87e8bfad691af9ecae65c1d0f","src/runtime/metrics/runtime.rs":"628ca61eac20eccbe0eeaa3e1bdbfc855005e635971e5fd0fcb011b145a2ec69","src/runtime/metrics/scheduler.rs":"27ac9b6efc0d7bee874c270d5e6d46b8f2bc7a78e620b474a6b00034082842c0","src/runtime/metrics/worker.rs":"aeb298a0a517bcabe19d097b9b4500193e2b0bf974e4c6e90a51c3c39e3bf42a","src/runtime/mod.rs":"7cb4660e62db48b8af4657477d702ad82efee34114fe90ce14942c02be2daa4c","src/runtime/park.rs":"e1d66e859e402068e70580277b4b128b48bda145de77ddab90c55b8a19799083","src/runtime/queue.rs":"6f4a02d98f7784979511bc3a36995f3cd6b6bdd412c036f2b06c49d22a1cccae","src/runtime/spawner.rs":"05ee54be6a380640aa7d7a9107d81b9a80c5c479f65f70be99059c6f4062841d","src/runtime/task/core.rs":"801361712c9a930478dfd7d55f738d0c7374e0676366bf6dfa9891a3ca64737c","src/runtime/task/error.rs":"eb3ef53fbcd235b44bb25c759f0fd8fd46a0548e299d36fdf7785dd55d4e816a","src/runtime/task/harness.rs":"c0182d7f6e4cf8256367aa5ad5e77fb16f9314a987e8e4a0504751e013734e9a","src/runtime/task/inject.rs":"34cd85d0d9d241ad1bc34d710bac708ee2f3fbe43bb68732123a30ed99ca2e36","src/runtime/task/join.rs":"358c88e23e62f4577c5534a54732e2981c6f7daba05860550ad296a466c9e3f7","src/runtime/task/list.rs":"0c671142c97b3223fc060f8c4ad281c96d5e8edcd5142bc704e612dcb8820d97","src/runtime/task/mod.rs":"5d13730713d45db479d9f28da2bb59bd24d4e58798ff5cb5da26468304d5ed17","src/runtime/task/raw.rs":"78fa51cd21505a9df06ba1f35d8e3dbc88093e7b2e066cdda9b65fbd8827ed7e","src/runtime/task/state.rs":"6b872d5c489daa68091f9b17be8dee645b5c960a1eba8df8852f27646202d20a","src/runtime/task/waker.rs":"f06c05fa37d1974218a44922edade90e0fe533e06e2bdd111fb3219e2393cfe7","src/runtime/tests/loom_basic_scheduler.rs":"9ec324867d9d123bf93c8f1c9feba8f9b57689122a1efa784adff425962b2d65","src/runtime/tests/loom_blocking.rs":"4110de132b975fbfff2172457ed0e75ab84e9e643eb8099770f86fa173e12215","src/runtime/tests/loom_join_set.rs":"b4d8701e530376ea62249a6329af7fd0c25d6539fa8a530acb0b26acd0e5b54a","src/runtime/tests/loom_local.rs":"69cf7fa67da9e3efe3eee722791f811efb395dcf821780051e62842654de09e3","src/runtime/tests/loom_oneshot.rs":"cb0cb66e1014708a10a2236c2dbbac9441b6a6e36a49c419fa3a51af62b054ce","src/runtime/tests/loom_pool.rs":"30ea07a942c39bf1eff70cc6d7ae10cc5f0162c0389029ef0bf8b14ebaf4533d","src/runtime/tests/loom_queue.rs":"1906ba6c36659a48236245baeedb109c58ce2331e36041c3abfcda5e0b285a80","src/runtime/tests/loom_shutdown_join.rs":"2f48626eb0a4135d480aa5bf72e514c4abf39aa17711c96d3829f93c03758557","src/runtime/tests/mod.rs":"0432bc4633080e7a85de00df15831f277c94f04a5829a761d0818a31ed47f01b","src/runtime/tests/queue.rs":"d1cae3668a51719c6facceef669846f5ba532e0006ef1bf29a187e1d91bc6b03","src/runtime/tests/task.rs":"52e9bdd03d931bd24020214ddd2390dcb9ce5aca36c039853f4257b42796ab1d","src/runtime/tests/task_combinations.rs":"9ec3bfdd62aa8beed6738b36870a383be9db452b67dd4a08a6992bb23fba8564","src/runtime/thread_pool/idle.rs":"b048a6ed70ddfe08670b0f99535566be7eacafd75df9c3a9447ad14c835ca326","src/runtime/thread_pool/mod.rs":"d4fd42bebe199b63969bd69318e7bdb3295c2c24f73d436c603355169c44a5bf","src/runtime/thread_pool/worker.rs":"2e8520b49caa661fd2439ff2db72cf41de861fb4bd87eeb62170850d19fd50e8","src/signal/ctrl_c.rs":"3e1d98df851b9ea1418f607c260434e906ada4e3d6c14cdfa5c81a0c4d55cdd3","src/signal/mod.rs":"e326c839b0b8d549aff9e4e9204b99c544ab49f32ab2c5066fe3c7b8bc7767eb","src/signal/registry.rs":"d4eb2577277a9909b4d6f79d53ca76774a1d0601c31609bf1929a829027fd4c1","src/signal/reusable_box.rs":"5793ac41840ac8cafce7db78cf6c1fa94943829d8ebc2653c7ff07de87aadb0d","src/signal/unix.rs":"c88b98373a9eec9f509e45ad94de9e725de3b0d1c1552d3c78ee0aa943ebb4e7","src/signal/unix/driver.rs":"78ead5545500535c20e061b0e5809fcec15369286083ce9208a0a1fad771aa37","src/signal/windows.rs":"18680b96548ce1269615587b803186eda94521f9af8519f5ef6aa04cd706c816","src/signal/windows/stub.rs":"e080a121465461e6de2b7756956975cc755820bb7c54248b903d7d18aa79e255","src/signal/windows/sys.rs":"8a7178e52c33689420e54bbfc2db78f1b15e3c7f16632594d6d668ca1d865fb3","src/sync/barrier.rs":"4dfc962897d02d7a71ce16e2cd134a72dcc10e5efb79abb66743a081a1cbac18","src/sync/batch_semaphore.rs":"628ad5e2b2dc4bd1166519e9beacd34319e1d9c18f04833c7c909804aad0b06b","src/sync/broadcast.rs":"123abe24e57759871d42b6dbdd90809ff47abd06908a140e14870cdc58c0e0b0","src/sync/mod.rs":"339338d51c28f55b70c2050d0dfb527d5111fa321c7b0be065bff41150cda215","src/sync/mpsc/block.rs":"c8ee862dabbecaa67b848abf62e97a5c445ab99e4fa7ec0a8c024e3c7bec0bf8","src/sync/mpsc/bounded.rs":"69e63335f55717a1e396f39893c74e5404f97a852ecceb5e76ca98613664bacb","src/sync/mpsc/chan.rs":"2c3564e8217fcbaf6d69e0217e71f8196d8617a158c7c8d9aa8195a1ae50d242","src/sync/mpsc/error.rs":"f445a7c3894799ca3be60f714bf4431766eb27e159209eeafd99e398ec6c7976","src/sync/mpsc/list.rs":"45b165d0965695248d63bcadc17e571bc81dc2e9d7448e86517d5a9a818401ed","src/sync/mpsc/mod.rs":"abe1eca4a868e485016406ec59b46a7251bdf97ac0dc6c0a8d2985937b65b8a3","src/sync/mpsc/unbounded.rs":"01d047a3e2fe57a25b2fb13e489765ccfca15cce110242565087afd913cc8afc","src/sync/mutex.rs":"3885f93e3e5cc7051ee62c66c94582097cebbf693494e232726bf65aabc127b1","src/sync/notify.rs":"654d4f804874fd220f3f5a28752220f031f5c9c15dc6c8bc11d3073641b9a3ad","src/sync/once_cell.rs":"935cdcb05ac86fe10731c6946f3c89b52851da61e42297fc6a98eb1418eb6a53","src/sync/oneshot.rs":"67e0504b7ba54768d7926ae7cfa4884350412344269b0deb44d282118a5fb540","src/sync/rwlock.rs":"eac7ae4ee97bb4962bcbd8f56a1256933be30db6db1394e97d30d866526dd3d5","src/sync/rwlock/owned_read_guard.rs":"6f1f39e8a84188cd3a57683c46da492257c4752d9204a158d04cab7acb26ff69","src/sync/rwlock/owned_write_guard.rs":"ffee768eb69cdddb35e51fc6b9547762e9131b46c236e44d87de86ddd7d65f8b","src/sync/rwlock/owned_write_guard_mapped.rs":"ae79b73500cbdbc3f8acd9a456bb292fa97cf0c3d509434895c906d84c0ce101","src/sync/rwlock/read_guard.rs":"edbeb6fd9fa848ff8e4749c5e3a4dfcdb8c82842ca8c91edb86c139af854ea79","src/sync/rwlock/write_guard.rs":"d2c7fbc1b32ee6de2f4aecf546034a620c4427fe244a9fb4e8e506e4ef9f5a43","src/sync/rwlock/write_guard_mapped.rs":"cb632f2653efadb3717d1cac0112891f676218431155753640f775705e33bd53","src/sync/semaphore.rs":"477b93fdd093a1e76954e97d3681074b28a1f1fe2a5dec751ad35a57960e4e15","src/sync/task/atomic_waker.rs":"41e9e05522254afbacec9895390d6f95498e413a61d8f654f9c421c808e7f83f","src/sync/task/mod.rs":"f5e38105c7f8a942c0e49b973bad0a8c2a1df81deea19f3c5228edc4896c1725","src/sync/tests/atomic_waker.rs":"388254c09ba995b46918980a61d440a1eb3f81db3c8abec3e1843301ac543547","src/sync/tests/loom_atomic_waker.rs":"984b52699c47383b9b62e6c4ff93fd458bbe64cb4ca836463adbaf94d27c38ee","src/sync/tests/loom_broadcast.rs":"b2c6f138707fc389ee7d91109bc38093af9060b3465e68d3543cb652e0070406","src/sync/tests/loom_list.rs":"f0ce15a0f965fe558a21bca24863c712156eaeb10feb8ef91031a6d6e3cc5dba","src/sync/tests/loom_mpsc.rs":"4883352b9d75a81c878609613545ae14910eca4f2e7f3718053dfdb792aa0760","src/sync/tests/loom_notify.rs":"c571a81799e617222f1d07e2b9b0ae9b21ead982aea90205757b6038859abd04","src/sync/tests/loom_oneshot.rs":"c3596c15692b16e7cb8cd6957362adb3a98b3d7f16c4a4262f19a3a27f262b03","src/sync/tests/loom_rwlock.rs":"80ec00bdcac838806d4d9f711cb154e02f22913ba68711855c39ca92028d3e4e","src/sync/tests/loom_semaphore_batch.rs":"c6f69b8d5b2e6842287ed34638a9045095d9f94c86ba6bb84c1224bbe10026ff","src/sync/tests/loom_watch.rs":"558ad9bab3f18b79a0d46548aff24288f1c6c5d0581e51559cc2a82cccd0696c","src/sync/tests/mod.rs":"1ef2026ac3dfbb70a437b8be441deca4b7b5e72638b71daf753b89808cd3c9ee","src/sync/tests/notify.rs":"d43bac90fa8a5dae3fad76d33f622e55767205fef2b4e8667f0be84d838a9146","src/sync/tests/semaphore_batch.rs":"0046207b0711f4d6cca228a3abb6523fcb5a1e6642255db4d469cb5246c86f7a","src/sync/watch.rs":"1b5fb2ef0108c4e8a9393943d50fe3b1aeb1ff3374c5d59ddf6524d001a20565","src/task/blocking.rs":"34ed9c27dac697ea9911cf5818be7589b3fa0dc9e2594ee1e3a06ea16c34f41c","src/task/builder.rs":"ec7f53b165ec941e2066ab01d00f68a2ea23074b85a894204a4ceda5ca76c21d","src/task/join_set.rs":"56afa4dbf1a618839da837090618cf262b629495776635d65d6db969ea70a8cc","src/task/local.rs":"6698564a6f3f7991ae17cc5d11e6bc0311cdaa9396caccfaead53a5116151e27","src/task/mod.rs":"e3d51a6f3be1ee1ae75b7b05bada2912893189474e2cbf5f09d91c64f5e18f3c","src/task/spawn.rs":"c315a3cdf3d8e493f85a3693a65478619a75a86c7e563cfa6820e93bf44850c0","src/task/task_local.rs":"5ea65d92a4766760807b1729b61e92fa34d280deebebc19fe486dcc18341d03e","src/task/unconstrained.rs":"a39131550205cbc0b0ad5e4cdc1f3326ab243774a31fc986cc4ab0f27f356459","src/task/yield_now.rs":"2734c883ebd307ffb6e1b43c336f3537be96074886de3d362b1b7a3e1cc67120","src/time/clock.rs":"5a653de458a61c1c6343167b5fb30dafdb5fcf29d1103869a03cbc228d6b15c2","src/time/driver/entry.rs":"bd073d1392fce2f128206957c1b09fcf571d95e104bdc39423d5267d4362d492","src/time/driver/handle.rs":"655393617dc953ffe08e34e7662b20c64339f4cf96092ac763beff1d8b06f4aa","src/time/driver/mod.rs":"a8f2dfb5aa31df491310fcf213715b782f5bfd14034565e345fb5f2bfc7c70dd","src/time/driver/sleep.rs":"52644c616131d1ee617a8c2aa41ab7b5a30433d960a1d808e551a1d34fcdaa58","src/time/driver/tests/mod.rs":"bc6b7a8d7ea5002f82e77bb1d848ae390c56e293e6ae271e8eee59ec6a8d1acf","src/time/driver/wheel/level.rs":"6319e3537ab9470ff3404a3043ce4404b478728a4b48f884dc9db11c34ce4867","src/time/driver/wheel/mod.rs":"138edd95c6a095d2edb53754bbc40bc719ae3a8bb10b4d4fa2daf67c27f6a508","src/time/driver/wheel/stack.rs":"3adb5316b814656d6b271c85b99ecf2f23431e966ab4cfff26ba999ebdd8915e","src/time/error.rs":"2ca1e032254ee67087aa15255d9434067c65ef84d143cbd01280f037bea8c19c","src/time/instant.rs":"056bbebc2f9e3d8572c0a1ab48610613415b34db4b2e6494787d14834d4f06ce","src/time/interval.rs":"ef703ae199aab4fc6ae719a0db830ec57a08b8245a1dc29043adb16c02e9a30f","src/time/mod.rs":"e1602cbfa9a93479b580fb1e205751e0551a0233140390dcfe5177b2cca252ee","src/time/tests/mod.rs":"d5a5971423d84c60593baa494e71bec40655266afcd532545d70a680fc014cbc","src/time/tests/test_sleep.rs":"78c97ec7816ef02b89749b0c0384e2bb2f34a3e5f5daa1480729fee91e69bec7","src/time/timeout.rs":"43adefcd4bbd41cd261996007e5667747e4ab0486aa62bc7cd9c78e44e797712","src/util/atomic_cell.rs":"6e156b730f69c698cece358ff5b206e8dd88083230fd49b2fc4a66a539b92553","src/util/bit.rs":"ad3117c9a8b1e312114a72e9b315c37137af61da51ea458832d46d2c1b88879b","src/util/error.rs":"d13d6f901192c38f4f3656ff0f2fe81b82d37ffc2a10644ea9ac4c2b0d2119b2","src/util/idle_notified_set.rs":"9c6c9bff4a2c66c83bd9a70800a29772aff688286183a36aebb1da2303e65d0a","src/util/linked_list.rs":"824ae1872cc389bbf95ad746d46229051ddf90db320751e3531c0fbe4029a2e5","src/util/mod.rs":"3462a9f130045d2fda886efaaf21e664b1e8e31111f6cdfc1d4cd72d761bee05","src/util/pad.rs":"5dc99dbb3d3d16fecd6228fd2e2f67e5301b2d426e6149f79f93c1af1b4d1d90","src/util/rand.rs":"85d46431198a8c07d08c969e3556245f66ab7062552e3717d100c6d6769408f8","src/util/slab.rs":"7821a370afb5449c7347c54456f18087a7d00a1b59984a1ca2e2aa8250eafb2b","src/util/sync_wrapper.rs":"8f1ab76280171c33c0bafaec9c1cb1d48cfb0d19a1ab2b768cdf7d7e40f07d00","src/util/trace.rs":"f00f5e3c7e603ee7d700b40994976e8798440fc6bb4463bd2b89abddd436d048","src/util/try_lock.rs":"c4ee49e1751ee0a7df1a8cbd4f8d36ea1d7355e3ac584fdb8697a94cd7a7a8f8","src/util/vec_deque_cell.rs":"ff7d49cc4f660f55cb6ece97d1660bc5c36dee1eeb42643187ded07a33a0a6f0","src/util/wake.rs":"27046cb8116d4aef4b9296024d1a9b822121c0813b8390acca45878c9a13c85e","src/util/wake_list.rs":"c3443e695fd0b5c41c06d66cab96549064e9b645043559d01f82f365dcc4ff6a","tests/_require_full.rs":"d99eb68c2efd1e77f66689f805d89bb25f74e7aa589444ef7da71ba87dd1595a","tests/async_send_sync.rs":"de9c6aaab8a806f3a334489bde421c3dfc1d7186abd8545d0affee3161c8e5ab","tests/buffered.rs":"3ca857823d8073fecd4175dcb886558a3c6f3aec81803a90c5f786fc3edb4a96","tests/fs.rs":"b4902aaff2c28ef4d2676462381b04559fb4f7cdc0ecf46c46bccbb6276feb5d","tests/fs_copy.rs":"83448b19bdc332ec315024d4903b0a2ae81221895725a8b750025b47a43b0e79","tests/fs_dir.rs":"d9a837aacc072620c621e7e9799261b44873feea51cc6d25770bd3ff93abac00","tests/fs_file.rs":"d5ce8931976a6909eb656f90a3f9721a9788fa1e4965d7400bf196372925d1cc","tests/fs_link.rs":"01689c5c69aaa33543399164e2036ed96ae6e09b006c8dbe8af59df8c63df47e","tests/io_async_fd.rs":"ed56435564501e1cadd284f180dddfbd747df72e74c4b7b7a77bc166383bab04","tests/io_async_read.rs":"a590efe9bb01986f067118640a3b55f6760186e554f8e8f2404231d4e96f61b9","tests/io_buf_reader.rs":"f5a322dea6fe9f40c18a085a865919c1bbfe8653203b37d1e18b77d259c6211d","tests/io_buf_writer.rs":"4327940f81c0591e5c7b4a2583af1d42169c9427bcdeb88327d9a351f02d43fb","tests/io_chain.rs":"f5d3ddc9f6e8152ceb08b5dda2ca3168b174f1f67ff28a4c5983bcbad69d8af6","tests/io_copy.rs":"0683dee400710c1696a6634ecee64c39e7027344e66bfdd6b2a78de8ca913555","tests/io_copy_bidirectional.rs":"8ec51c37395f273de8ace88e7203ce78af2c4acf00b33fd968239a644d28a8cc","tests/io_driver.rs":"76db3259c69f66ac07e5b8bcdb1e2e46d42e06e202831d8ccbee835b2dfc9714","tests/io_driver_drop.rs":"31e7002a20ab65b6a91904b4784047bdf17b8cfa134edef04b394430a43573eb","tests/io_fill_buf.rs":"a520725811d266e8833dbfb62999d4a32ad8aefc6403e12a51f8b0fd62530436","tests/io_lines.rs":"f5b1599ffff44819e269519ff0a08635ea1c5d7c541293e63ee33d98f25f0e3b","tests/io_mem_stream.rs":"24be85e8d95a896f0398459d140df1de24acbb8e70132d32893bf7fc0c24f1ed","tests/io_poll_aio.rs":"165f80ebc81e8ccb4d335c9b9a89d960f097de9b17e92bc964effa3c76ce5f98","tests/io_read.rs":"528a2146495c53b271af460c0dcba28d54cc71821811002a7f8de213b4cbb385","tests/io_read_buf.rs":"3c1a7820fc5e486fe34c6fb61b3e1bf8d18587790a4897a732f325bdd438721d","tests/io_read_exact.rs":"b6387dbeb0baceb7a1f74a9a3a8b4a654894465368be27c3bbf4352b79fc4314","tests/io_read_line.rs":"8296624b4f5e162c79024f3beab2f561f4195a244cfd4c53e4d06282f56a31bf","tests/io_read_to_end.rs":"b5478431bf61dd66cedeb8e0ef588ed8ecd6d9e0feea3655512019b4abe9d451","tests/io_read_to_string.rs":"c9ebfee5cb262d822119c2881ea1cc0c73598b13c517c297663e35bb120a089d","tests/io_read_until.rs":"b6c0df9e4852766910ec68affcd92fbfbc280018b7f9c16cf5f4830f9b8389f0","tests/io_split.rs":"b1253c9f3e733e6bd1b6e7e4a1515606902992e15e57c8dd2ef28ccfcb7618d5","tests/io_take.rs":"f44ac14d94493f2346f5b0b2e23f4147ed51ff466165a5eb63bd525bee6719df","tests/io_util_empty.rs":"32dff601a78e46e12339bf1577463c7ce1070d71d78a2fb33318112a111dc120","tests/io_write.rs":"98668a8c8feae0f85714df1dfecfcd94fba4ba347bdc3d8aaa4ea8b175055c69","tests/io_write_all.rs":"e171af1ecab45a439b384c3bae7198959c3f5e2e998967dbd9296760b52951b7","tests/io_write_all_buf.rs":"2c037f07ac464eaa4e0b87e4e4968b28a0f2f1b1d1e218546c9d5dac7a75d145","tests/io_write_buf.rs":"331d3b54c7664386bb87585f39910d1fe31bfbdfa012a2dc2120e535dcdac329","tests/io_write_int.rs":"3f4b50345f7d7d558e71ac7f2a8c1c4b7b771dad09fe2e1fbf9a17d4fb93c001","tests/join_handle_panic.rs":"97169881a5fb93ba044b86d7d81ec6d358be61e7e34e03af174fccaf8e0aa572","tests/macros_join.rs":"a963ce60e82f247bf034e275db5bf25a406b28872df52243eed8763671edafd3","tests/macros_pin.rs":"37569f60c199103b167a4979b61e8cee2fd63e39273d4c94eaebfa281a6f0556","tests/macros_select.rs":"e9ec258ded92767961cdf42ec2f19feb98b2acc3e48023e758cd5a54ed8ea879","tests/macros_test.rs":"748b157b3be8c5bcf49ce28c7263dd8df31c59548951f8e0654e4cee8e3c36e0","tests/macros_try_join.rs":"faec183aded3ad042abdc23a08d7147461c9ec47c08269c6742bb1d094ea2e04","tests/named_pipe.rs":"fba6c0d454a78ff938056d1573f07ebcf1844de386f7331c6750681ce16feeca","tests/net_bind_resource.rs":"3abdf9457ebc9f8262c03fa5834f1ceb6312d4a1573b6bdd4e2f584e3cf76b66","tests/net_lookup_host.rs":"436d541bff6a6775244f9989694ef4b40457c44d847fd2514e8915516a711113","tests/no_rt.rs":"eb19939398940c56f5bf27f9da66e29894f9b2957bf724828767b52eaddc1c6d","tests/process_arg0.rs":"785d801cf281230e3208512820e7d71b4d8362b8cf33fc72235da370e10983a8","tests/process_issue_2174.rs":"66c56d8dfda4e1723928da713dddea8d15774d6730398effadf0ec28f4c6f1e1","tests/process_issue_42.rs":"26043f8246b00046137551f7a9f638652c70f527f10b4d91e4286643120ca41d","tests/process_kill_on_drop.rs":"56d908ccead7fd23d1f73976524cbd0b0f05806d342818095f2c6803469cc3ad","tests/process_raw_handle.rs":"54498de544b61e95d75bf6e92779fbd9b805a434ffcd1ea18496a1c5cd7c7522","tests/process_smoke.rs":"3554f4a8c8b76ec0a3b79f4ae0b5900edd34255dd3adc561e8e9a77f217b1956","tests/rt_basic.rs":"c43409b5c9300c3cf8b14521ede32355124637558143dc4f25ff02dfdb7de1e5","tests/rt_common.rs":"f09915824604a33c8e21098c84c648ec09cf4e26ccb3a8e43fda7f58df10f31b","tests/rt_handle_block_on.rs":"76b49a32abb7ed59c16977a119cb41ee7459221510eb919cdebdbde378db95f2","tests/rt_metrics.rs":"fa0b32715c2749380924469c125a97b9950a0678c524b3dcf241f6903939dca7","tests/rt_threaded.rs":"921499dd76186e70f1a88a546327398e1979c6237143f9f5090565f6289e0218","tests/signal_ctrl_c.rs":"9b53065781b37f3db5f7c67938239b0f3b0ebbc5938c14a5b730ad7ec07415d2","tests/signal_drop_recv.rs":"d1ec97213d9c6fd9fb25ea8c2b015c9e9ee1a62fe0853fc558bc8801e5a3a841","tests/signal_drop_rt.rs":"f968c1154262a4427b5aad2d9fb36d3b7d47084312d0b5527a8eb3d589381d8b","tests/signal_drop_signal.rs":"041940550863250f359630dc67ef133874d809ddaf0a6c1238cee1565a19efec","tests/signal_multi_rt.rs":"a1c50c25f4707fda7665da61b3317dd61fc32c63c61db2bbdb56065bd9c591ce","tests/signal_no_rt.rs":"99714bf488a26b6b394d93e61639c4b6807f9d756c8d5836f31111a30d42609b","tests/signal_notify_both.rs":"bf0b9def20f530d146ee865305833d8e9bee07a0515e66573d7ff30e2c631123","tests/signal_twice.rs":"bce33093eed151955d13c334d6d8a5bc5ca67cf5b37c246e435a24c15bc166a0","tests/signal_usr1.rs":"86ad07594b09d35e71011d1e12a1fa2c477bfbc4a2a36df1421b6594a0930074","tests/support/io_vec.rs":"9b3001e120138ead4a63720019c669ff00f8455a74dea2fb231633b3b58c9b09","tests/support/mpsc_stream.rs":"00d48122fa2ccbf1fe0b110ce3cf22590eda54b3ddec0134b1f9376eb1169645","tests/support/signal.rs":"83531afa2e8e71cfd90cd4e1fc821490ffa824f0f9f0c9c4a027c08fed6b8712","tests/sync_barrier.rs":"37401fe3de3010c2767ce0b1105c1ce991bd9c59ddf409403faa8a4dcdf30ffd","tests/sync_broadcast.rs":"0e13d330ce8932205998df5c7f4b0d613f88bd37b97d491083224d7a4a1d6fac","tests/sync_errors.rs":"438469440a1910457faadf9386757cae6e4f14807aaa513c930a8716ff68bc70","tests/sync_mpsc.rs":"989798056a9031d35cf01c6dec051b2cfe21b98b2e8f9edbf315853c2b057f66","tests/sync_mutex.rs":"e9ebbf1261204f0eb7ccf013e7455a1e11df8ebbef6105967e342559d4197a01","tests/sync_mutex_owned.rs":"911464f1aec2d5916fdde3746e9534705d1232f4161d4be56d2673d9d360b6cf","tests/sync_notify.rs":"e4689c9f735643b5993014df051db7057fd140b39975755a7434a64aa88f3ac0","tests/sync_once_cell.rs":"639d55248775f2cba0709919a3b2ff4317e70f081e09444d0a67791d65dfb3fa","tests/sync_oneshot.rs":"41e6966f3741ed848e71e973e65e630d549c6f9568f02feba737fbe3dea353bd","tests/sync_rwlock.rs":"1ddc830bfb961be4b05b1af549dacfad78ed2ae84eb2d9d12f8a201bee6cf929","tests/sync_semaphore.rs":"9796ca2c67c56676b4dc01055a55765f81f00e164b5af546ce2f0134af08cd4c","tests/sync_semaphore_owned.rs":"6d0a1067fdd6350ab1778e346fc105e9d17d9f40bba72700245f9184f0f6b5a0","tests/sync_watch.rs":"1d41976a05392901399f3d16c92544e835bc8f6c2ed2b5db2e42a8270e5fff83","tests/task_abort.rs":"a12920188752719d557a77c96586b1ef5917c424eb2b1237f90e017f01d51e25","tests/task_blocking.rs":"241574dfc0793008f80d64c3f6da64842fd1d46cc88964b59380e6796e0b116b","tests/task_builder.rs":"9f0212ae4d7071f2e491effca992ed8884b2ef2e3493a6d7ee3fa659cc926338","tests/task_join_set.rs":"e362f7d17ce4373db59b01e0c590e8ed231e4099220a4838039a9bc1d4a7d612","tests/task_local.rs":"eb6fbfc6ba024885863dd5d633611f21bffdd2f96a7fbb596598dfc0c151bf9d","tests/task_local_set.rs":"dfeb90355de7ffd59aa567bbae5539f859b55d64255e6c71785118364c9da8d3","tests/tcp_accept.rs":"a17b374e8bf590633d5c3ba434ca0a8545ad96d8c01e140bb8418be968bd8cc1","tests/tcp_connect.rs":"3e457719e2ecc877090e039465d492706c9d0318f483c70fc325866ef84108f5","tests/tcp_echo.rs":"68d67368f1d9f9dffbaa9fb1a0ef71e4f3700a76aa5a2c3714c8cf7aedb0f1bc","tests/tcp_into_split.rs":"9636970f8450139462d7b88a4994ffb6c4d77c92f89750ad9a479ee70b0ed5ce","tests/tcp_into_std.rs":"9fab18181788d46e787e87fcdbbb2d1a24c1a60bbe8bcb1bb1e63cf43326c643","tests/tcp_peek.rs":"ea904d05f9684e6108a698bdbbd856c9849c1a51eb334cf5bd45ef74c8fe585c","tests/tcp_shutdown.rs":"68f3a5b9d166064f63819a53bd07bd1c0988c1c84832ac4a35ac9b1ad6f447f4","tests/tcp_socket.rs":"bbc3f467f6d2c13e975d28198d9e186f6d4a46dd893049084845ee5b4d468095","tests/tcp_split.rs":"e967b01bb90e2081e5e08e8cfd619cbaf0d1dcd08c4e77dbd5bae893698cae85","tests/tcp_stream.rs":"0b597cfb53d00e2af278b64d74b7de6a1436b36b3a9a4966de3976c52d6f37e1","tests/test_clock.rs":"d5c9bf7bb5d926e2b29b43e47b5bb051b0f761bfe44d5fef349ed442ea7b416f","tests/time_interval.rs":"cd1e459be096c33f6d2b25030326dcdbff3eca5f83ac514d236ea95ad5d12283","tests/time_pause.rs":"aadbea51adba2f437bffb3a01f444e634f629651b1bdd89134354e5ae4782ba8","tests/time_rt.rs":"2981f4a4c276b306c4d4bf5684bafbaffb0789ce7bf8d2db3556e5208bab52df","tests/time_sleep.rs":"61556bc4882dfa111bc8a2eab566046d547c2f886f58be8b16d0d9523d9c5f7f","tests/time_timeout.rs":"ba53f80e8d3be2cd8c72388ca6b2e9d444504861febcd077cae5d7a02dc59425","tests/udp.rs":"ad6dfeda7a4721b7bd00692a05c9d668013c7f5e86714e6ed1005b99eacabaab","tests/uds_cred.rs":"146c6e9bdbb82a268f178b03575f94a40db81ef67a740dd16ead5b9e4a447f1b","tests/uds_datagram.rs":"06964ba9c88e49296da2e27dc8ae0d2bc85a55875a31f3875cfcf11f55ae6851","tests/uds_split.rs":"79d54d6ce35e5d15138299091871ecbdb6492ae6863fe406021fd7359f1ed7fd","tests/uds_stream.rs":"d672ed046b6e6ad9e2fd118be91c788680972e3f5271e91857f29c2e25d1b9d8","tests/unwindsafe.rs":"dab3dd646625a878ce979cb54d1a68672444d105af3337b06b5dbb169283521b"},"package":"2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"980b8cc5fbf63beeab6b72d6d7f7987c5a30df469e705a15c5e997604a1f2661","Cargo.toml":"c370efe20fd3c0f36e1ed75307fec12b959ab8bd647c186b4e9d64d55c4bc204","LICENSE":"697fc7385b1b0593f77d00db6e3ae8c146c2ccef505c4f09327bbedf952bfe35","README.md":"fcb522a61d41bfa31b3ee897b36058a8ee7dd957fcc7206ddedd4aee2c1054e3","docs/reactor-refactor.md":"f7dcc42be8ae33446be399add73f234fc217a5d67478cfcac65601058ab50d47","src/blocking.rs":"8e62b2cdc512fedbca4b4c4f983629af035afea4ee7e918bb1a3e9851c8e034e","src/coop.rs":"ef7e148a1e2023657f9e4e2c927760a28b3e7d0684e8e5ccaebcebe90d7033e6","src/doc/mod.rs":"313fe6b3eb0e0e7a9d9cb070cc18e8749f5dd44724b651a90af46acf02a4f7af","src/doc/os.rs":"4af4a78a0af962f94e5e8f67858350e4fedb3a5f990db277a0aa8bf4f7b923b1","src/doc/winapi.rs":"f8b16da97e4822116be6a59ef0d9549a857861cecd584353a7fb02df173dde9c","src/fs/canonicalize.rs":"93c64b72abdca17877d6ab61d50a43765d6aef9e0a9f7aaf41b6b0b7d9a8a380","src/fs/copy.rs":"262180fadc66e5ac2bf1e8389628fdd039f14788e66a4f8b10e10633e7310f20","src/fs/create_dir.rs":"233cbab2579a787614aeee88845a57f1578e9c2e064a3456c799f61430e911ad","src/fs/create_dir_all.rs":"56081d541caadca0fc59e84d55e78e702fe9373679598016224ad0b072b189a7","src/fs/dir_builder.rs":"b5b21229c7bf15e2074afab1accfbc392f3c69e546c7b652cfdc8e90d5a4a301","src/fs/file.rs":"193078a2addd11de302024959c79984f13933e5e420098641fc71327bdd90d78","src/fs/file/tests.rs":"b00dfdb1cf5347e773d268447b0cb1ae4aafd036abb7703ba0d6bf404e155cf9","src/fs/hard_link.rs":"98cccbbb3719baee11c232e79723ab1cb3d6c8056bddb109c4990fe2c236c1fb","src/fs/metadata.rs":"782a1a5dbc2cd6c40e928579fbfcf39e5f1de28def78781590c0280acdf02960","src/fs/mocks.rs":"d380cd15d2644ed240f66a2acc7168b5a51f1357a65eb764cfadc98c42676013","src/fs/mod.rs":"eced6fd2a09a6e458556caafbd444753a3577a5dbbe11688f31c0e1e122879c2","src/fs/open_options.rs":"e1f78d96005082e6886de6fc8e6545cdb72e4e4fd444549b53c5bd7addd8510a","src/fs/open_options/mock_open_options.rs":"f08d384602bbd165eed4cf8811a19512d62c4d9573457f8d22c78c77b1091629","src/fs/read.rs":"055ae8b6ae96ebae2d05f8780e7592bb587a742506c6df8ee8b380fc7d2820ef","src/fs/read_dir.rs":"76a90e7a465da2e977392b65265dfa766c19d8b25967a278a5ac20f6a0543243","src/fs/read_link.rs":"93c104a21253372fef7056ab82e6065b0a1a8fc9be8b7329dfd5a8dd07c618b0","src/fs/read_to_string.rs":"9e5b2d476a6084e32a92c5421a8abc9d4f335f4ec677beec4bf8bfa109d7d106","src/fs/remove_dir.rs":"96475771e9c52678768288d8df6814216e0801bebc848481597ad34e829a5854","src/fs/remove_dir_all.rs":"b85abd05c7ab64ee8dc6cf5663a11e713aa51b357759ef660ef3cae3365ccc42","src/fs/remove_file.rs":"1cdf8bf16b3a164c594dac8773b7d1f9ebb28de169343184d34d6aac3b3a7eaa","src/fs/rename.rs":"a97875e92626fa46e23fece7b8698c9c4cea2bae8f1be8726f30ae6fe80ae0c7","src/fs/set_permissions.rs":"8adccafa475dcfc1bc3989af73374d90683c1be4953ef812e5fd606f968d7b7a","src/fs/symlink.rs":"32cf3e906531d30ebe6d8be7ee3bfe049949759b566015b56d0851f51abcff50","src/fs/symlink_dir.rs":"5fbd05365555ba7942ffc1c2dfddf201ddad2cf9b005be2ea99849a473fe982b","src/fs/symlink_file.rs":"a1170fd40a000dc449de972267f579a0d14f50dbb39466f985f183fdcd1d3438","src/fs/symlink_metadata.rs":"f5ce1e05f137da995e3e0d9582bae0a5f7ef4251285c64e912b0eedbb068b395","src/fs/write.rs":"1ffb734d31748bd879ad398b0fe99bdec569782b42677022957db2cae95c4d2d","src/future/block_on.rs":"ef7fd744d2727f169b6d8997e2202af88a256bf41a894c93a4490ade65fa0e57","src/future/maybe_done.rs":"9042619f2a907e81763ac6082c080faa28af5d571dd49e82b60f3f14d58598f3","src/future/mod.rs":"6f28857347c50cd6467f1ca7d1b1028b633de7d142c3f3ca710a8693d1e839f9","src/future/poll_fn.rs":"13c8e4b1e26fea2b2b86a0184d7cfee62b0884312c152a5e281faf20958a8427","src/future/trace.rs":"c42712a8d372922eba7e05cd21382fe5df5eec02cbcc870062100b59ab99654f","src/future/try_join.rs":"0ea5a069b17a34bbc091acbd74b9d51794a55a85dfa63fe2404d5ee91a4f0038","src/io/async_buf_read.rs":"b37caa8f6c974b3c97327c635218803e573c531d4197950840549aa794357c99","src/io/async_fd.rs":"b3eb76f964bed4ec0b3d20059a038e178c13e462f25860b824d7b754109ede2f","src/io/async_read.rs":"f52c8d2f4a283c0dc8d06dc974484749973125b0b691bc0c3d100972ac67cb92","src/io/async_seek.rs":"a9a0df389ff2be3d79208ec475fcfede46a86f6ea0b822b1a4ce8273ec714b0b","src/io/async_write.rs":"198ed6a475b6f093fd2ff15e618d816d7e33cb8bc28d2e2299533e5df0bd78d6","src/io/blocking.rs":"d6613950dc10f17287ff396d6acc7f386bed1ffaaeb71dcbb10ad4ffcfb29ed7","src/io/bsd/poll_aio.rs":"893aab3d616e33fbb6ea61e6d590e80d2871e676c0647a58041b16e4698ca34e","src/io/driver/interest.rs":"726c813211a8606f68e58a2693cccdf3788f252aa1e9700af088ba756ba36c1d","src/io/driver/metrics.rs":"e711c65da155ef73e567125c0c9db33b2f511982a0c0a6b4f3b37c2d87c791a6","src/io/driver/mod.rs":"cfd8bf97f4213209c32f9e453a1be28c703bd1f5b2f3d4e358633ab2fb082dcd","src/io/driver/platform.rs":"023acd3f2703d241b3e91ab6e4d4c0bc5ccc3451655fffd9f37938224f915494","src/io/driver/ready.rs":"e4bac7c50bf7c2d29b3516e9184c2a46cc4631366ae940c41cc5320bb35a9250","src/io/driver/registration.rs":"7f34a9a2aef1ea6efb92e235111f8bc5f863cd48f04f4af5aba57d656f5b3cf8","src/io/driver/scheduled_io.rs":"d8da8185d1c72af922746737769a612457f053eb529a465d242de641fde1c731","src/io/mod.rs":"1d32445c95577065827eb98ef842bf4c9be842da376ef95c565c0a65820c55f6","src/io/poll_evented.rs":"e3b9765a8630b497be71acfbee58fb00174399e4755e04859d418ed82350094b","src/io/read_buf.rs":"8a79bdd31b7d77dd585db87dde45f4208f7f131a6614b51f5a776d512b435c2c","src/io/seek.rs":"e9e346fc926c3360601b80a8319a25fd0567dd6f77fab666694e9787deaef633","src/io/split.rs":"2c6982598a86620b76826ba6065953e9fc78a0434652fe97d82d79f7bbcb66b3","src/io/stderr.rs":"8f4d9fc5e596180171b68919bde951c49d134446d30af06dbbbe413ff2882ff5","src/io/stdin.rs":"5c15de00a7fe2ffee2f48f2ecb2a4fa765f3de891f8cdbc240e2988db7cc0d13","src/io/stdio_common.rs":"b76cf63fadb9c92710c7610af178f26b5b0d855e5495b061d920223c0f8bfb58","src/io/stdout.rs":"2c1b11ae547b0eec06e2b21039655a2acf34125b789548c7bd362babf5b05cd2","src/io/util/async_buf_read_ext.rs":"7f7fde910ecd9e526bb85882a318f457dedf6ccc2cdbc693112562186dfb78de","src/io/util/async_read_ext.rs":"f70825f0be7ebecce5473c53c716cc57f1123e5818f7b917444c3892639a5e2c","src/io/util/async_seek_ext.rs":"804ea19c98a28aacedc38852f692f59c52f1327807a0620c1740182ac6b01e02","src/io/util/async_write_ext.rs":"6256ed8320aa225fbe9ba159cdc20b08e1e22bcab4c090fdb1dca1cdbc2d97b4","src/io/util/buf_reader.rs":"670a58f404e5689daf1f2b3070b0b9e95fef96ad19f0e8487f294e8a2afe558d","src/io/util/buf_stream.rs":"2246fe71b707c15d7168c5da5ee158cec6e854d4fd11b685531c16a9c3cf2c6a","src/io/util/buf_writer.rs":"f9c3e018c9f9177fb6d910096503caee727bebd3c36f5f67dca2c4c55044408a","src/io/util/chain.rs":"5cd8df2cc7bbcd18ca2336a78507fa8009c0a9e595f81730a8c16cadb8f731a2","src/io/util/copy.rs":"34f164f2169caa58fc9c43415108e61c662aece2e2f3010470d89b3fc3e2aedd","src/io/util/copy_bidirectional.rs":"0f72d957fa2d1b3517224e34c65f06016f5e04857d45b01c368398fb95e555f5","src/io/util/copy_buf.rs":"b029d0ee8da5301a06722d425828418027234a8b111a209fa49ead161263aa8e","src/io/util/empty.rs":"48f23da224ff6c5d3803b24e4d5e6a18c75b31344a4570081bd0b3c3e0244fa6","src/io/util/fill_buf.rs":"223725d828071e923f25d2d49a0f6e470c411a6d9ba225700f2dd8d5793601bb","src/io/util/flush.rs":"fe3b4ff226e294843b8cbea9dc4e02d581582b78ddaafce137c96e290699c718","src/io/util/lines.rs":"1d9f9b99567111c911e72a4caa2abb19b277f2cdd0ca3268ac5ca6df5276259f","src/io/util/mem.rs":"222519afeb028305f7d7165e8ded58aceeabc5935b223ae73bb2413703565ddd","src/io/util/mod.rs":"6a9012d78fe2bed8240e7a628e9421cbef45433551522624065fdcbb329f3594","src/io/util/read.rs":"01c1113b6171c83ba2a0ad774703c3139b0ff11f47953f1b50861334d44f86ec","src/io/util/read_buf.rs":"a87be2d115c09a6782ec8cadeafc92fb1fbe534580e71540087c3298a03bfca2","src/io/util/read_exact.rs":"4a8650fd7a885963a0fef2bec24c17046c5624e4dd7fe229ab3f33c4a92fc66c","src/io/util/read_int.rs":"49da230796335df584832cd7deb8370b4d1e0350d743046389a9d9ae17dbd94f","src/io/util/read_line.rs":"9cdb2d778b81bc50098a6851981ed9f541bd0c7896c0763b811971b5a598b7e8","src/io/util/read_to_end.rs":"7aca8032ee911467c24ce435b7bcf023c72ca6f19c2663269ff50d31913d6e01","src/io/util/read_to_string.rs":"fafb5463b013cc8f76def3a505dbebd179afc95bde0e2ca9388e428265788924","src/io/util/read_until.rs":"b2a2a7c434439fd2c9269494e59edbe1f75eb45449dd6be384663a6ceaf137ac","src/io/util/repeat.rs":"d4effcd81338831eb373cf2db972a99218b8379b91066940a732edcf4524c7c2","src/io/util/shutdown.rs":"971454342b4636fbd68e123d59d87017d81f72afb410c385846069b11def8efe","src/io/util/sink.rs":"0dcb794e48ca9b1c28e5f9f2051073ea0951a54c9c7dfc903ce9e5489d3d8cd7","src/io/util/split.rs":"03a59adccda29608886e38a1f484fbd4d6a6019180c4cfa851372d250796aa5a","src/io/util/take.rs":"ef080ce27d23cc23b9f99fe14ddd56349a3cb1442aba18f8405c30d20017b074","src/io/util/vec_with_initialized.rs":"413d30e6010b0300901f97ab60ceef8d3bdd24c29a79dad4917094934fe28efa","src/io/util/write.rs":"20d14ee545ab1f67732915522e97808d1ddde13d151505c1289b596be519f7c8","src/io/util/write_all.rs":"906ff3fb24c6a979b104598f9a8229421bcaf2a4218c28069504b34a218241f6","src/io/util/write_all_buf.rs":"5911bf673ef89097938f4e2e38d9012865b28a0ce5ebb217ebe0e2507de6c1e3","src/io/util/write_buf.rs":"ab51d6174de24cbb729ce77dbaeea27e16059b8253e4830d8243ec5f08a08a8c","src/io/util/write_int.rs":"f321e69b0c7c01728b079e9fdeedb96c26475667e8b259d0c5f4a83d060990d1","src/io/util/write_vectored.rs":"7a335a9f796daa048fa9708dc44d32d2567b36461a6d88f07893eb31f304b69d","src/lib.rs":"779367d062d41439499a0c0707b5d6f9719ed533b7a23feb3344f6e18f7e6507","src/loom/mocked.rs":"6db5ed42cd469ac1f98d04c535e59aea174d6c06aed5f9f0c5b5254dc6e476b9","src/loom/mod.rs":"b14b9333a7a21bd125a4ae82f01e5ea9c9ed2f78d7d1ad49a13d9b176f1fe8ab","src/loom/std/atomic_ptr.rs":"16d7e6f841697020aa315a303f26cca714c35a96c4912ae45b90ad3ab0715e28","src/loom/std/atomic_u16.rs":"8793a4927367270305a6a3a68423ccc838cead0960ab1a7cb345681182916c14","src/loom/std/atomic_u32.rs":"39889c3295f5a201ecbd4ce3f5f942d88931fa9988071f771935dfd0c6c3443c","src/loom/std/atomic_u64.rs":"9d5ab70da2a45c30270693b853b896b2c7778c03912e6adc23c5d4c0b216b766","src/loom/std/atomic_u8.rs":"98f6baee74059eea2fc950dabc273a2fcb3518950015fb3a5acb3dbc58ffac03","src/loom/std/atomic_usize.rs":"ce7e840ac029a91adb90d7834c2bec3a8ef5fcf0b311de0bb6900c0be199301f","src/loom/std/mod.rs":"0503d16f472852d2fdb1bf9edced988a264ec933e9ba678de65f9d72b5e88d6c","src/loom/std/mutex.rs":"83938246987904d2cf0fbd76566170e62ab89a2e10dc309f8aa4e149cdaba74e","src/loom/std/parking_lot.rs":"89c0b87687dcfe5b200a82f78ab9517b40c42f07da4b9ced59067b76a8c391f6","src/loom/std/unsafe_cell.rs":"05e2b4d8e78b0435650c6da6239ce8c53b4de2573e64ba098354b049f23429ec","src/macros/cfg.rs":"00da37040b630916b8b2eb17994adef8bc7569585e8395e891cf561c6db173a3","src/macros/join.rs":"1596f8d9147f0686bb91029d68f1cabd3ea11ded6c7e4b93f3f7a473a579bb71","src/macros/loom.rs":"80d2e4af9fc50d0bda1b20b95f8873b2f59c3c0e70f2e812a6207855df76204e","src/macros/mod.rs":"913c824be0242590352d952a2ec445ef1d3d77e1512cdea4e2be7f4956cb9bf5","src/macros/pin.rs":"294e5644061e41801dcee5494b7334439e09af0b6219ce164090feb624864631","src/macros/ready.rs":"6efd4c866c4718c3a9a7b5564b435e2d13e9c1ae91fd98b1313d5e7c182942d6","src/macros/scoped_tls.rs":"d598cf0c6e57d91d56432898ebab741aec3fa07b08e0ab5f91dd151127f4ae3e","src/macros/select.rs":"97e1516bcfa7f9dd22ac24dcdc4a205ca65947495a77c4a8656fbf2c985d582b","src/macros/support.rs":"0cf0789ba2dc5a196ccbabba9ca9f1861a71284032b98dc7c372078c12c5d5ce","src/macros/thread_local.rs":"8602495ed102b63e3048a261eda7483dc9a24b15a74d7059c31635e8f45de19a","src/macros/trace.rs":"33befd4533a3b2b4b22e246033f2bea8930174a7da58adaa57dbf20931275bcd","src/macros/try_join.rs":"8959ac69395bf76a63c1e34a1a12bd930f5c3bbab939fa086abdabed1055586e","src/net/addr.rs":"7b5e9b1cab3f7b743e5071822b6378dfb26a876705be4b2e1f9a27952a0e4d5a","src/net/lookup_host.rs":"c7a21d735225e316253d822f6b11a17107e6a8db004f947a54d8bc12ec782baf","src/net/mod.rs":"b75d65aee9871a9b5ead894a01199d0af48f993e12a5e49041a91148739876bc","src/net/tcp/listener.rs":"c0f9d5e115da4127ce6792122e48d39dba1888928411241a3e4982ea68f38767","src/net/tcp/mod.rs":"d33297e086d7fcc6c46584782d7b24f65578bf9895844f1ec6cde95cc5b8902d","src/net/tcp/socket.rs":"fcf5fb69ebe6b65926d8b41126205b454d14c1a007f70a5686f3199972472ad8","src/net/tcp/split.rs":"e477c847baf20c5f0235a40720a60fd3564cab66bef37f48c2a17cdf95de96ad","src/net/tcp/split_owned.rs":"55f393899efa77a41571e64d96507a374c413050ee866144c8f57b3d2935b2d4","src/net/tcp/stream.rs":"cf3ac9e6325c83209d2ffc89440e1567bc70209234c54217e1c0602064003405","src/net/udp.rs":"536197c3efb7cfce4b8c93b3bbe79f821aa11e3f35b04483389a7f73aa6a251c","src/net/unix/datagram/mod.rs":"fc48924e5d1e551405b0708a2d122473cdafeeee802a5722f64b4cf41a1c01da","src/net/unix/datagram/socket.rs":"34ac9c85ac07c12a75f69e672eda6cb38cc6ab074e6f62df9508c84dac9932a0","src/net/unix/listener.rs":"eb9f0deb2f6f292b3224f5fc4c6aa12fd4e842e867e75ededcf4859276dc1b8a","src/net/unix/mod.rs":"2c18ab3fef385be4b1603cdd391f557f36a3188b8bebc3949647b27bbd6d84b4","src/net/unix/socketaddr.rs":"66bf18321a81baeb394a7094567632b113e44e12d2643109b6a97c89d919bf3a","src/net/unix/split.rs":"9f6c51cc59c8b2c6bd8e7b8f918486ccd8d200973e240c58ce26490569db2182","src/net/unix/split_owned.rs":"e0640e4fd2b6be95139f7f1aa014c149fc2dc834e7264382b7ff84750b93e10b","src/net/unix/stream.rs":"09bdd08a9d60bc59f7a3edce8a3bc346d110b487dc424c22715a81228e6919f6","src/net/unix/ucred.rs":"836fe68abe151abcaaf4d9bc92025a12030f1c2b7beb314cc453d0d88aa316d1","src/net/windows/mod.rs":"a1525f35c1acb92b15bec788a625b76acb42f9424f392074db697102ce79760d","src/net/windows/named_pipe.rs":"c75485247534dbce545b240b0aaf3f9d1896f4d2d4c817c847fad523ba70ad77","src/park/either.rs":"251c6255768445ca2bcfb037b1fcffa05e9ca0f4f31923ffcf341eb3cd794180","src/park/mod.rs":"0e8d343742e6e498c4f4d44a837d01da2e75012aada2117641937c56f85abc8f","src/park/thread.rs":"f5c4885e6c7e3f6e33e87774e79ef3bbef13fc7a4e7f9c5f27a86d74fec8f275","src/process/kill.rs":"2f98bd1bd28ab37bedc34ab7b737760407ab5315420538acbd18da31d2662d94","src/process/mod.rs":"f1cc072459efa44ea85e3fa4985e63524bfb3abc9e8606ca1093de624180b701","src/process/unix/driver.rs":"073baa2a7b1065745793a219cb7045907f339f71945943dcb3c52a2cfeb7ea45","src/process/unix/mod.rs":"ab58ab44dfd8ec1da5dbd064faad0ffdb10c2c70c56e741cb5726648dec12f4e","src/process/unix/orphan.rs":"9927c3c918bead0b56b1a9256b8c8eba9f9f16f87f0c81866278b65bb96a1f72","src/process/unix/reap.rs":"62868319849b8482d8d927dcd00cc8a74b9af61fd47494b39bd41fe2f4dcf0b6","src/process/windows.rs":"feb605a16ea975886b38a66f30b3aad97f7e9ae73d35eb2ad45ccaae36d0e4e8","src/runtime/basic_scheduler.rs":"4c849b97e03b4af046c941a0fc58424022d3eb1d3911671a01ac987a4c6b5337","src/runtime/blocking/mod.rs":"660fe19c0ec9b0b8d96f125dfd9f2db5189f8bba9d011d6ef7b34e67c19bdd7e","src/runtime/blocking/pool.rs":"73e2468244213611a4fa66eb594eada9430d9735105e0c2f6886ad447d4d1b05","src/runtime/blocking/schedule.rs":"fef31b442b9e743a4e0f3d490cc320ecab17b475f2be7c995a57fd2af7b34b88","src/runtime/blocking/shutdown.rs":"964c67d028152e4daa3d424d90297f4589973114a527dd081c90cbc6c80f6fec","src/runtime/blocking/task.rs":"60b68f1dc13c84a64376f078a0eed4cd9e9f160e9ac0a069b2a8b11eb748eb5b","src/runtime/builder.rs":"8c1c0d95974b4a1061d2a2651d61794aa44a26163ac01ced0802d4aceb3d56b8","src/runtime/context.rs":"d7c0350fc0a70085de7b2801bcde6333fbb350e533f213d163900b1e05aab8d6","src/runtime/driver.rs":"763324ba05d5901acd060532551cc25c539ee9c1c9a5a87b72450fe74f2df394","src/runtime/enter.rs":"76d42510a6f0bf159746268c316071d44daebf7523347c032917f55031f7880f","src/runtime/handle.rs":"027f95c813ba537d17a5d0522801e45ad66315a78142b67e88a884da93ea8633","src/runtime/metrics/batch.rs":"fbe9c32fb1c5643b7cc3a2f563e2bd1582ea068a5c1215a151a50f1855c2719b","src/runtime/metrics/io.rs":"960f44573cd16e2ec43ed872f235ece09db9f4c87e217002ae0d180e20ce9838","src/runtime/metrics/mock.rs":"0daeb89a321a7922ff28432122ece55b818ba60469d63531e9a3b29eae110647","src/runtime/metrics/mod.rs":"81dcffd01a0969399eefb539af6b1961a64ab01fe35d4bbfd080d0077e1a62ba","src/runtime/metrics/runtime.rs":"ed350c297eda220b55fceff78a0da54b0ccba8b7c1a3df30d5e9629d401f42c2","src/runtime/metrics/scheduler.rs":"27ac9b6efc0d7bee874c270d5e6d46b8f2bc7a78e620b474a6b00034082842c0","src/runtime/metrics/worker.rs":"3077acc44d872a32a42fb60e6d4c140c1af94b5518664157623f9e45feb2420b","src/runtime/mod.rs":"b40b0865ac412ffbae4770e3b795739ef3603e6f23c5bf0af98364f1f432f4d9","src/runtime/spawner.rs":"53ffaba33ead638ef7311fc10b1aa87f9d4c7d88565d1be8ef3bd84e1464e1c5","src/runtime/task/abort.rs":"ebec41d621a263395c90cacceb2ff6176fa3347b2b95a673bab1e1bedfdfbe2c","src/runtime/task/core.rs":"76a8c906be48f9ec43ac7c880bbd4105bf395c0667f0277c01b2154fdc01bc47","src/runtime/task/error.rs":"15ccd341663e152a7408725ab2c7fe10a04bd7f4e60d46ee9cbca4c6d5ee103c","src/runtime/task/harness.rs":"1f1a09e492947691045b6c04bcd8c06ace6b31e51b446cc37fd366a1075f6eab","src/runtime/task/inject.rs":"34cd85d0d9d241ad1bc34d710bac708ee2f3fbe43bb68732123a30ed99ca2e36","src/runtime/task/join.rs":"9fe4705e07716d3472a6a23409605b4e4a2dfb221d322900d369bfe0f40ed780","src/runtime/task/list.rs":"e114e404b96b0da3a0c1f1202071f834cbb2c7fc826c0ce0ab3ebae96b62eeab","src/runtime/task/mod.rs":"bb6a13f754e58bbd89fd4ed5f7f2a64886fc0dd0d838859637d7036e4a91abcd","src/runtime/task/raw.rs":"a1f18fe669f7e826f1ab043cf0c53ea41c88c765a1b1aa40836545df43880d61","src/runtime/task/state.rs":"6b872d5c489daa68091f9b17be8dee645b5c960a1eba8df8852f27646202d20a","src/runtime/task/waker.rs":"f06c05fa37d1974218a44922edade90e0fe533e06e2bdd111fb3219e2393cfe7","src/runtime/tests/loom_basic_scheduler.rs":"9ec324867d9d123bf93c8f1c9feba8f9b57689122a1efa784adff425962b2d65","src/runtime/tests/loom_blocking.rs":"4110de132b975fbfff2172457ed0e75ab84e9e643eb8099770f86fa173e12215","src/runtime/tests/loom_join_set.rs":"c49bd310cf4dff3299fa019d62561019a62177d42f1fc3fd63152657aa73404b","src/runtime/tests/loom_local.rs":"69cf7fa67da9e3efe3eee722791f811efb395dcf821780051e62842654de09e3","src/runtime/tests/loom_oneshot.rs":"cb0cb66e1014708a10a2236c2dbbac9441b6a6e36a49c419fa3a51af62b054ce","src/runtime/tests/loom_pool.rs":"30ea07a942c39bf1eff70cc6d7ae10cc5f0162c0389029ef0bf8b14ebaf4533d","src/runtime/tests/loom_queue.rs":"405ca9ff4eaed8fa33d378c5750762c77a2ecfdb98a23d87e041a423b09c0dc1","src/runtime/tests/loom_shutdown_join.rs":"2f48626eb0a4135d480aa5bf72e514c4abf39aa17711c96d3829f93c03758557","src/runtime/tests/mod.rs":"ad5565dae8f52cde322b1df1be1cc01ac56b1767dbd97bf26364548e1117e3bc","src/runtime/tests/queue.rs":"837b4015f432d22a981ce4172a5ebfe636f341fb03c0a0c7e115e91d45758298","src/runtime/tests/task.rs":"aef9eddf77d38a93beab65f3be9fbac913aa33936c995adb7aea843dca2347a8","src/runtime/tests/task_combinations.rs":"635bc4825113a062eb6fa46873068e6c49329a2810101302cfb5bb2595b903a0","src/runtime/thread_pool/idle.rs":"b048a6ed70ddfe08670b0f99535566be7eacafd75df9c3a9447ad14c835ca326","src/runtime/thread_pool/mod.rs":"92aea09b20856798e3da3445edcc0bb368af17d63ba0fe442d3450d77e8256ae","src/runtime/thread_pool/park.rs":"e1d66e859e402068e70580277b4b128b48bda145de77ddab90c55b8a19799083","src/runtime/thread_pool/queue.rs":"77cd1ce2bc789d3a53dbdfcacff8a928f6f470b232d2288a8ab84856a9a2f022","src/runtime/thread_pool/worker.rs":"994ece4c9b71f44eafed8f356ea217fe0651e50bb1555163fa3776c2a95709ad","src/signal/ctrl_c.rs":"3e1d98df851b9ea1418f607c260434e906ada4e3d6c14cdfa5c81a0c4d55cdd3","src/signal/mod.rs":"e326c839b0b8d549aff9e4e9204b99c544ab49f32ab2c5066fe3c7b8bc7767eb","src/signal/registry.rs":"04d6ac98cf9f4ae7921677def41082e13283fe854fca919eb8acfbb8240c0ed7","src/signal/reusable_box.rs":"3d3b710b1794d9f8f5463e9ca380ece60c426b58786a5cb5f40add627da01aeb","src/signal/unix.rs":"a1d53840a849e682fadd08f86a35f4f0561255e168d30d3ffa59960645f9fd90","src/signal/unix/driver.rs":"78ead5545500535c20e061b0e5809fcec15369286083ce9208a0a1fad771aa37","src/signal/windows.rs":"18680b96548ce1269615587b803186eda94521f9af8519f5ef6aa04cd706c816","src/signal/windows/stub.rs":"e080a121465461e6de2b7756956975cc755820bb7c54248b903d7d18aa79e255","src/signal/windows/sys.rs":"8a7178e52c33689420e54bbfc2db78f1b15e3c7f16632594d6d668ca1d865fb3","src/sync/barrier.rs":"42e44929886752bf48c0da7436735e1f777638740e1bac716170938cb4d75f89","src/sync/batch_semaphore.rs":"a7b067fa77cb5aa20ed5088560fe4ab19f2822cc865426e1fedaf8cc7dd46169","src/sync/broadcast.rs":"e6b72353606e836386324e53d1db8139c3b452568bb66f8033ff03517fb1e85b","src/sync/mod.rs":"339338d51c28f55b70c2050d0dfb527d5111fa321c7b0be065bff41150cda215","src/sync/mpsc/block.rs":"c8ee862dabbecaa67b848abf62e97a5c445ab99e4fa7ec0a8c024e3c7bec0bf8","src/sync/mpsc/bounded.rs":"69e63335f55717a1e396f39893c74e5404f97a852ecceb5e76ca98613664bacb","src/sync/mpsc/chan.rs":"2c3564e8217fcbaf6d69e0217e71f8196d8617a158c7c8d9aa8195a1ae50d242","src/sync/mpsc/error.rs":"a2968482264bc4b75cac19774bc33c7bfea5e96eff171d167bab407f4ace8c52","src/sync/mpsc/list.rs":"45b165d0965695248d63bcadc17e571bc81dc2e9d7448e86517d5a9a818401ed","src/sync/mpsc/mod.rs":"abe1eca4a868e485016406ec59b46a7251bdf97ac0dc6c0a8d2985937b65b8a3","src/sync/mpsc/unbounded.rs":"be6639979b6b675c20886b6883584b542233ab5a612028ea8c92ff8971bfbc1f","src/sync/mutex.rs":"3885f93e3e5cc7051ee62c66c94582097cebbf693494e232726bf65aabc127b1","src/sync/notify.rs":"9a4fc8994b2e267455b8edba75f2f49cdef5c6dbfb6ff2dafb1c334bcbd33275","src/sync/once_cell.rs":"935cdcb05ac86fe10731c6946f3c89b52851da61e42297fc6a98eb1418eb6a53","src/sync/oneshot.rs":"0a3fb86a1953519a37b953c50a04e9a6c6848d4165a7d08fba158fc055fd3bac","src/sync/rwlock.rs":"eac7ae4ee97bb4962bcbd8f56a1256933be30db6db1394e97d30d866526dd3d5","src/sync/rwlock/owned_read_guard.rs":"6f1f39e8a84188cd3a57683c46da492257c4752d9204a158d04cab7acb26ff69","src/sync/rwlock/owned_write_guard.rs":"ffee768eb69cdddb35e51fc6b9547762e9131b46c236e44d87de86ddd7d65f8b","src/sync/rwlock/owned_write_guard_mapped.rs":"ae79b73500cbdbc3f8acd9a456bb292fa97cf0c3d509434895c906d84c0ce101","src/sync/rwlock/read_guard.rs":"edbeb6fd9fa848ff8e4749c5e3a4dfcdb8c82842ca8c91edb86c139af854ea79","src/sync/rwlock/write_guard.rs":"d2c7fbc1b32ee6de2f4aecf546034a620c4427fe244a9fb4e8e506e4ef9f5a43","src/sync/rwlock/write_guard_mapped.rs":"cb632f2653efadb3717d1cac0112891f676218431155753640f775705e33bd53","src/sync/semaphore.rs":"477b93fdd093a1e76954e97d3681074b28a1f1fe2a5dec751ad35a57960e4e15","src/sync/task/atomic_waker.rs":"41e9e05522254afbacec9895390d6f95498e413a61d8f654f9c421c808e7f83f","src/sync/task/mod.rs":"f5e38105c7f8a942c0e49b973bad0a8c2a1df81deea19f3c5228edc4896c1725","src/sync/tests/atomic_waker.rs":"388254c09ba995b46918980a61d440a1eb3f81db3c8abec3e1843301ac543547","src/sync/tests/loom_atomic_waker.rs":"984b52699c47383b9b62e6c4ff93fd458bbe64cb4ca836463adbaf94d27c38ee","src/sync/tests/loom_broadcast.rs":"b2c6f138707fc389ee7d91109bc38093af9060b3465e68d3543cb652e0070406","src/sync/tests/loom_list.rs":"f0ce15a0f965fe558a21bca24863c712156eaeb10feb8ef91031a6d6e3cc5dba","src/sync/tests/loom_mpsc.rs":"4883352b9d75a81c878609613545ae14910eca4f2e7f3718053dfdb792aa0760","src/sync/tests/loom_notify.rs":"c571a81799e617222f1d07e2b9b0ae9b21ead982aea90205757b6038859abd04","src/sync/tests/loom_oneshot.rs":"c3596c15692b16e7cb8cd6957362adb3a98b3d7f16c4a4262f19a3a27f262b03","src/sync/tests/loom_rwlock.rs":"80ec00bdcac838806d4d9f711cb154e02f22913ba68711855c39ca92028d3e4e","src/sync/tests/loom_semaphore_batch.rs":"c6f69b8d5b2e6842287ed34638a9045095d9f94c86ba6bb84c1224bbe10026ff","src/sync/tests/loom_watch.rs":"558ad9bab3f18b79a0d46548aff24288f1c6c5d0581e51559cc2a82cccd0696c","src/sync/tests/mod.rs":"1ef2026ac3dfbb70a437b8be441deca4b7b5e72638b71daf753b89808cd3c9ee","src/sync/tests/notify.rs":"d43bac90fa8a5dae3fad76d33f622e55767205fef2b4e8667f0be84d838a9146","src/sync/tests/semaphore_batch.rs":"0046207b0711f4d6cca228a3abb6523fcb5a1e6642255db4d469cb5246c86f7a","src/sync/watch.rs":"9ba65b76daa2b44068460290a746a370e871837219c27121e46c5c20c1571cc1","src/task/blocking.rs":"34ed9c27dac697ea9911cf5818be7589b3fa0dc9e2594ee1e3a06ea16c34f41c","src/task/builder.rs":"b4aeb63386d0893a919c2f297c440366c1ee23727df8ecbe5eb3316c312656eb","src/task/consume_budget.rs":"86a968d3bdc30edfef350ac4d6426b7bd9e3cc2f8bc4b9c3d0f2db7909456cd3","src/task/join_set.rs":"79a09bac8b7224045e7ff9379b1c8526b043343a88e6b9a8320fc2775348a45d","src/task/local.rs":"7ad8c9ac30afdda913eec28e981ea923f7cdc3bec70beca3b982c6a13084192a","src/task/mod.rs":"d8c116453acdf57138152143c3a2500384acbf7d7eb3027d4d6a6be7be3046e4","src/task/spawn.rs":"f3a7d95a013ba38a9e216fc31c94038c4d7e04183d398c1bd9119208c133829f","src/task/task_local.rs":"5ea65d92a4766760807b1729b61e92fa34d280deebebc19fe486dcc18341d03e","src/task/unconstrained.rs":"a39131550205cbc0b0ad5e4cdc1f3326ab243774a31fc986cc4ab0f27f356459","src/task/yield_now.rs":"2734c883ebd307ffb6e1b43c336f3537be96074886de3d362b1b7a3e1cc67120","src/time/clock.rs":"5a653de458a61c1c6343167b5fb30dafdb5fcf29d1103869a03cbc228d6b15c2","src/time/driver/entry.rs":"bd073d1392fce2f128206957c1b09fcf571d95e104bdc39423d5267d4362d492","src/time/driver/handle.rs":"655393617dc953ffe08e34e7662b20c64339f4cf96092ac763beff1d8b06f4aa","src/time/driver/mod.rs":"a8f2dfb5aa31df491310fcf213715b782f5bfd14034565e345fb5f2bfc7c70dd","src/time/driver/sleep.rs":"6513f9db9b1347206069b77377443aa9587e0a875977aa742115cd0643534562","src/time/driver/tests/mod.rs":"bc6b7a8d7ea5002f82e77bb1d848ae390c56e293e6ae271e8eee59ec6a8d1acf","src/time/driver/wheel/level.rs":"6319e3537ab9470ff3404a3043ce4404b478728a4b48f884dc9db11c34ce4867","src/time/driver/wheel/mod.rs":"138edd95c6a095d2edb53754bbc40bc719ae3a8bb10b4d4fa2daf67c27f6a508","src/time/driver/wheel/stack.rs":"3adb5316b814656d6b271c85b99ecf2f23431e966ab4cfff26ba999ebdd8915e","src/time/error.rs":"2ca1e032254ee67087aa15255d9434067c65ef84d143cbd01280f037bea8c19c","src/time/instant.rs":"056bbebc2f9e3d8572c0a1ab48610613415b34db4b2e6494787d14834d4f06ce","src/time/interval.rs":"88fcdbba56510f021c006d1718aef99b40ff0b38f000db9e38e6f77812a741ae","src/time/mod.rs":"e1602cbfa9a93479b580fb1e205751e0551a0233140390dcfe5177b2cca252ee","src/time/tests/mod.rs":"d5a5971423d84c60593baa494e71bec40655266afcd532545d70a680fc014cbc","src/time/tests/test_sleep.rs":"78c97ec7816ef02b89749b0c0384e2bb2f34a3e5f5daa1480729fee91e69bec7","src/time/timeout.rs":"43adefcd4bbd41cd261996007e5667747e4ab0486aa62bc7cd9c78e44e797712","src/util/atomic_cell.rs":"6e156b730f69c698cece358ff5b206e8dd88083230fd49b2fc4a66a539b92553","src/util/bit.rs":"ad3117c9a8b1e312114a72e9b315c37137af61da51ea458832d46d2c1b88879b","src/util/error.rs":"d13d6f901192c38f4f3656ff0f2fe81b82d37ffc2a10644ea9ac4c2b0d2119b2","src/util/idle_notified_set.rs":"9c6c9bff4a2c66c83bd9a70800a29772aff688286183a36aebb1da2303e65d0a","src/util/linked_list.rs":"824ae1872cc389bbf95ad746d46229051ddf90db320751e3531c0fbe4029a2e5","src/util/mod.rs":"3462a9f130045d2fda886efaaf21e664b1e8e31111f6cdfc1d4cd72d761bee05","src/util/pad.rs":"5dc99dbb3d3d16fecd6228fd2e2f67e5301b2d426e6149f79f93c1af1b4d1d90","src/util/rand.rs":"85d46431198a8c07d08c969e3556245f66ab7062552e3717d100c6d6769408f8","src/util/slab.rs":"218b9ab87aed16bc3ed0d71211dfedcd23c19c58771505331dd409440b33de63","src/util/sync_wrapper.rs":"8f1ab76280171c33c0bafaec9c1cb1d48cfb0d19a1ab2b768cdf7d7e40f07d00","src/util/trace.rs":"3b4e7ed40a16f9806a40f56268ec5daa519565c899216ab08a64c2f152f85c84","src/util/try_lock.rs":"c4ee49e1751ee0a7df1a8cbd4f8d36ea1d7355e3ac584fdb8697a94cd7a7a8f8","src/util/vec_deque_cell.rs":"ff7d49cc4f660f55cb6ece97d1660bc5c36dee1eeb42643187ded07a33a0a6f0","src/util/wake.rs":"27046cb8116d4aef4b9296024d1a9b822121c0813b8390acca45878c9a13c85e","src/util/wake_list.rs":"c3443e695fd0b5c41c06d66cab96549064e9b645043559d01f82f365dcc4ff6a","tests/_require_full.rs":"d99eb68c2efd1e77f66689f805d89bb25f74e7aa589444ef7da71ba87dd1595a","tests/async_send_sync.rs":"de9c6aaab8a806f3a334489bde421c3dfc1d7186abd8545d0affee3161c8e5ab","tests/buffered.rs":"3ca857823d8073fecd4175dcb886558a3c6f3aec81803a90c5f786fc3edb4a96","tests/fs.rs":"b4902aaff2c28ef4d2676462381b04559fb4f7cdc0ecf46c46bccbb6276feb5d","tests/fs_copy.rs":"83448b19bdc332ec315024d4903b0a2ae81221895725a8b750025b47a43b0e79","tests/fs_dir.rs":"d9a837aacc072620c621e7e9799261b44873feea51cc6d25770bd3ff93abac00","tests/fs_file.rs":"d5ce8931976a6909eb656f90a3f9721a9788fa1e4965d7400bf196372925d1cc","tests/fs_link.rs":"01689c5c69aaa33543399164e2036ed96ae6e09b006c8dbe8af59df8c63df47e","tests/io_async_fd.rs":"ed56435564501e1cadd284f180dddfbd747df72e74c4b7b7a77bc166383bab04","tests/io_async_read.rs":"a590efe9bb01986f067118640a3b55f6760186e554f8e8f2404231d4e96f61b9","tests/io_buf_reader.rs":"f5a322dea6fe9f40c18a085a865919c1bbfe8653203b37d1e18b77d259c6211d","tests/io_buf_writer.rs":"4327940f81c0591e5c7b4a2583af1d42169c9427bcdeb88327d9a351f02d43fb","tests/io_chain.rs":"f5d3ddc9f6e8152ceb08b5dda2ca3168b174f1f67ff28a4c5983bcbad69d8af6","tests/io_copy.rs":"0683dee400710c1696a6634ecee64c39e7027344e66bfdd6b2a78de8ca913555","tests/io_copy_bidirectional.rs":"8ec51c37395f273de8ace88e7203ce78af2c4acf00b33fd968239a644d28a8cc","tests/io_driver.rs":"76db3259c69f66ac07e5b8bcdb1e2e46d42e06e202831d8ccbee835b2dfc9714","tests/io_driver_drop.rs":"31e7002a20ab65b6a91904b4784047bdf17b8cfa134edef04b394430a43573eb","tests/io_fill_buf.rs":"a520725811d266e8833dbfb62999d4a32ad8aefc6403e12a51f8b0fd62530436","tests/io_lines.rs":"f5b1599ffff44819e269519ff0a08635ea1c5d7c541293e63ee33d98f25f0e3b","tests/io_mem_stream.rs":"24be85e8d95a896f0398459d140df1de24acbb8e70132d32893bf7fc0c24f1ed","tests/io_poll_aio.rs":"165f80ebc81e8ccb4d335c9b9a89d960f097de9b17e92bc964effa3c76ce5f98","tests/io_read.rs":"beb3ec157a40b8c986e4d81a241e86898b5048010e6e799de42c4acbfd6c4356","tests/io_read_buf.rs":"3c1a7820fc5e486fe34c6fb61b3e1bf8d18587790a4897a732f325bdd438721d","tests/io_read_exact.rs":"b6387dbeb0baceb7a1f74a9a3a8b4a654894465368be27c3bbf4352b79fc4314","tests/io_read_line.rs":"8296624b4f5e162c79024f3beab2f561f4195a244cfd4c53e4d06282f56a31bf","tests/io_read_to_end.rs":"b5478431bf61dd66cedeb8e0ef588ed8ecd6d9e0feea3655512019b4abe9d451","tests/io_read_to_string.rs":"c9ebfee5cb262d822119c2881ea1cc0c73598b13c517c297663e35bb120a089d","tests/io_read_until.rs":"b6c0df9e4852766910ec68affcd92fbfbc280018b7f9c16cf5f4830f9b8389f0","tests/io_split.rs":"b1253c9f3e733e6bd1b6e7e4a1515606902992e15e57c8dd2ef28ccfcb7618d5","tests/io_take.rs":"e0c2ae20cc41a5617f1477bf597d14e149dd242c8a2f7cf4b921d67a90fb96b8","tests/io_util_empty.rs":"32dff601a78e46e12339bf1577463c7ce1070d71d78a2fb33318112a111dc120","tests/io_write.rs":"98668a8c8feae0f85714df1dfecfcd94fba4ba347bdc3d8aaa4ea8b175055c69","tests/io_write_all.rs":"e171af1ecab45a439b384c3bae7198959c3f5e2e998967dbd9296760b52951b7","tests/io_write_all_buf.rs":"2c037f07ac464eaa4e0b87e4e4968b28a0f2f1b1d1e218546c9d5dac7a75d145","tests/io_write_buf.rs":"331d3b54c7664386bb87585f39910d1fe31bfbdfa012a2dc2120e535dcdac329","tests/io_write_int.rs":"3f4b50345f7d7d558e71ac7f2a8c1c4b7b771dad09fe2e1fbf9a17d4fb93c001","tests/join_handle_panic.rs":"97169881a5fb93ba044b86d7d81ec6d358be61e7e34e03af174fccaf8e0aa572","tests/macros_join.rs":"06d1bd29a47e2bfde898ad81162ca15d70cdf3ab522486d93e3d7872a40e86cb","tests/macros_pin.rs":"37569f60c199103b167a4979b61e8cee2fd63e39273d4c94eaebfa281a6f0556","tests/macros_rename_test.rs":"8bea48972e53ae6afc29149f7de9fef0fde73c1022529d76215ea7e89a00cd2b","tests/macros_select.rs":"2e497f987ac350c9dcab2db6f00f9ac61649b95cd48b4cd599b41c7a2a765112","tests/macros_test.rs":"748b157b3be8c5bcf49ce28c7263dd8df31c59548951f8e0654e4cee8e3c36e0","tests/macros_try_join.rs":"0b69e9665759feccced89a685b0fef82f377ade03d4f95fa3e60dd29d2ece17c","tests/named_pipe.rs":"fba6c0d454a78ff938056d1573f07ebcf1844de386f7331c6750681ce16feeca","tests/net_bind_resource.rs":"3abdf9457ebc9f8262c03fa5834f1ceb6312d4a1573b6bdd4e2f584e3cf76b66","tests/net_lookup_host.rs":"436d541bff6a6775244f9989694ef4b40457c44d847fd2514e8915516a711113","tests/no_rt.rs":"eb19939398940c56f5bf27f9da66e29894f9b2957bf724828767b52eaddc1c6d","tests/process_arg0.rs":"785d801cf281230e3208512820e7d71b4d8362b8cf33fc72235da370e10983a8","tests/process_issue_2174.rs":"66c56d8dfda4e1723928da713dddea8d15774d6730398effadf0ec28f4c6f1e1","tests/process_issue_42.rs":"26043f8246b00046137551f7a9f638652c70f527f10b4d91e4286643120ca41d","tests/process_kill_on_drop.rs":"56d908ccead7fd23d1f73976524cbd0b0f05806d342818095f2c6803469cc3ad","tests/process_raw_handle.rs":"54498de544b61e95d75bf6e92779fbd9b805a434ffcd1ea18496a1c5cd7c7522","tests/process_smoke.rs":"3554f4a8c8b76ec0a3b79f4ae0b5900edd34255dd3adc561e8e9a77f217b1956","tests/rt_basic.rs":"c43409b5c9300c3cf8b14521ede32355124637558143dc4f25ff02dfdb7de1e5","tests/rt_common.rs":"af8ea7dae88b139c09c285e00f7fda9dc8b030bfee8c800886e03f85408b9bdb","tests/rt_handle_block_on.rs":"76b49a32abb7ed59c16977a119cb41ee7459221510eb919cdebdbde378db95f2","tests/rt_metrics.rs":"bf1d1cfef228a27448bd7582957f16c483a61b81a53bc88903f12f5f90e2d273","tests/rt_threaded.rs":"921499dd76186e70f1a88a546327398e1979c6237143f9f5090565f6289e0218","tests/signal_ctrl_c.rs":"9b53065781b37f3db5f7c67938239b0f3b0ebbc5938c14a5b730ad7ec07415d2","tests/signal_drop_recv.rs":"d1ec97213d9c6fd9fb25ea8c2b015c9e9ee1a62fe0853fc558bc8801e5a3a841","tests/signal_drop_rt.rs":"f968c1154262a4427b5aad2d9fb36d3b7d47084312d0b5527a8eb3d589381d8b","tests/signal_drop_signal.rs":"041940550863250f359630dc67ef133874d809ddaf0a6c1238cee1565a19efec","tests/signal_multi_rt.rs":"a1c50c25f4707fda7665da61b3317dd61fc32c63c61db2bbdb56065bd9c591ce","tests/signal_no_rt.rs":"99714bf488a26b6b394d93e61639c4b6807f9d756c8d5836f31111a30d42609b","tests/signal_notify_both.rs":"bf0b9def20f530d146ee865305833d8e9bee07a0515e66573d7ff30e2c631123","tests/signal_twice.rs":"bce33093eed151955d13c334d6d8a5bc5ca67cf5b37c246e435a24c15bc166a0","tests/signal_usr1.rs":"86ad07594b09d35e71011d1e12a1fa2c477bfbc4a2a36df1421b6594a0930074","tests/support/io_vec.rs":"9b3001e120138ead4a63720019c669ff00f8455a74dea2fb231633b3b58c9b09","tests/support/leaked_buffers.rs":"bb34065abcd59b3f0f756b0aede2cf8242027e41ef16516e46e204fcd181cce0","tests/support/mpsc_stream.rs":"00d48122fa2ccbf1fe0b110ce3cf22590eda54b3ddec0134b1f9376eb1169645","tests/support/signal.rs":"83531afa2e8e71cfd90cd4e1fc821490ffa824f0f9f0c9c4a027c08fed6b8712","tests/sync_barrier.rs":"37401fe3de3010c2767ce0b1105c1ce991bd9c59ddf409403faa8a4dcdf30ffd","tests/sync_broadcast.rs":"e150b56c5a7e18522f6f011ca819c95c7405c43c064c2e2b75a801974630bdde","tests/sync_errors.rs":"438469440a1910457faadf9386757cae6e4f14807aaa513c930a8716ff68bc70","tests/sync_mpsc.rs":"989798056a9031d35cf01c6dec051b2cfe21b98b2e8f9edbf315853c2b057f66","tests/sync_mutex.rs":"bd48c9a0ab60de09a2197085ba65488e4c603fe23e1743fc85b35bfa5fbae3b1","tests/sync_mutex_owned.rs":"be9a3c9d21825a33cadc53bb75fe04f3415d1864ef108643a4d5b231ff9d4f89","tests/sync_notify.rs":"2e7ae7e901f2d6abbb1e3bce201bd077063a13918e5409ddef446cc1272e398e","tests/sync_once_cell.rs":"639d55248775f2cba0709919a3b2ff4317e70f081e09444d0a67791d65dfb3fa","tests/sync_oneshot.rs":"41e6966f3741ed848e71e973e65e630d549c6f9568f02feba737fbe3dea353bd","tests/sync_rwlock.rs":"1ddc830bfb961be4b05b1af549dacfad78ed2ae84eb2d9d12f8a201bee6cf929","tests/sync_semaphore.rs":"9796ca2c67c56676b4dc01055a55765f81f00e164b5af546ce2f0134af08cd4c","tests/sync_semaphore_owned.rs":"6d0a1067fdd6350ab1778e346fc105e9d17d9f40bba72700245f9184f0f6b5a0","tests/sync_watch.rs":"e064a41675670c4d731ca90eb99e57846321f90cc9ff81b43f58bb85f0a1513f","tests/task_abort.rs":"a12920188752719d557a77c96586b1ef5917c424eb2b1237f90e017f01d51e25","tests/task_blocking.rs":"241574dfc0793008f80d64c3f6da64842fd1d46cc88964b59380e6796e0b116b","tests/task_builder.rs":"9f0212ae4d7071f2e491effca992ed8884b2ef2e3493a6d7ee3fa659cc926338","tests/task_join_set.rs":"d61898733f7802de2af71966aa8b57a3d8cb2afecb27e2f7dfc6e6865ddd473a","tests/task_local.rs":"eb6fbfc6ba024885863dd5d633611f21bffdd2f96a7fbb596598dfc0c151bf9d","tests/task_local_set.rs":"dfeb90355de7ffd59aa567bbae5539f859b55d64255e6c71785118364c9da8d3","tests/tcp_accept.rs":"a17b374e8bf590633d5c3ba434ca0a8545ad96d8c01e140bb8418be968bd8cc1","tests/tcp_connect.rs":"3e457719e2ecc877090e039465d492706c9d0318f483c70fc325866ef84108f5","tests/tcp_echo.rs":"68d67368f1d9f9dffbaa9fb1a0ef71e4f3700a76aa5a2c3714c8cf7aedb0f1bc","tests/tcp_into_split.rs":"9636970f8450139462d7b88a4994ffb6c4d77c92f89750ad9a479ee70b0ed5ce","tests/tcp_into_std.rs":"9fab18181788d46e787e87fcdbbb2d1a24c1a60bbe8bcb1bb1e63cf43326c643","tests/tcp_peek.rs":"ea904d05f9684e6108a698bdbbd856c9849c1a51eb334cf5bd45ef74c8fe585c","tests/tcp_shutdown.rs":"68f3a5b9d166064f63819a53bd07bd1c0988c1c84832ac4a35ac9b1ad6f447f4","tests/tcp_socket.rs":"bbc3f467f6d2c13e975d28198d9e186f6d4a46dd893049084845ee5b4d468095","tests/tcp_split.rs":"e967b01bb90e2081e5e08e8cfd619cbaf0d1dcd08c4e77dbd5bae893698cae85","tests/tcp_stream.rs":"0b597cfb53d00e2af278b64d74b7de6a1436b36b3a9a4966de3976c52d6f37e1","tests/test_clock.rs":"d5c9bf7bb5d926e2b29b43e47b5bb051b0f761bfe44d5fef349ed442ea7b416f","tests/time_interval.rs":"cd1e459be096c33f6d2b25030326dcdbff3eca5f83ac514d236ea95ad5d12283","tests/time_pause.rs":"aadbea51adba2f437bffb3a01f444e634f629651b1bdd89134354e5ae4782ba8","tests/time_rt.rs":"2981f4a4c276b306c4d4bf5684bafbaffb0789ce7bf8d2db3556e5208bab52df","tests/time_sleep.rs":"61556bc4882dfa111bc8a2eab566046d547c2f886f58be8b16d0d9523d9c5f7f","tests/time_timeout.rs":"ba53f80e8d3be2cd8c72388ca6b2e9d444504861febcd077cae5d7a02dc59425","tests/udp.rs":"ad6dfeda7a4721b7bd00692a05c9d668013c7f5e86714e6ed1005b99eacabaab","tests/uds_cred.rs":"146c6e9bdbb82a268f178b03575f94a40db81ef67a740dd16ead5b9e4a447f1b","tests/uds_datagram.rs":"06964ba9c88e49296da2e27dc8ae0d2bc85a55875a31f3875cfcf11f55ae6851","tests/uds_split.rs":"79d54d6ce35e5d15138299091871ecbdb6492ae6863fe406021fd7359f1ed7fd","tests/uds_stream.rs":"287b7d5e297df3e326488d98f9645ebfcf8be0205ba6a8e5ddaadac58240e113","tests/unwindsafe.rs":"dab3dd646625a878ce979cb54d1a68672444d105af3337b06b5dbb169283521b"},"package":"c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439"}
\ No newline at end of file
diff --git a/third_party/rust_crates/vendor/tokio/CHANGELOG.md b/third_party/rust_crates/vendor/tokio/CHANGELOG.md
index 3f69f09..f158a94 100644
--- a/third_party/rust_crates/vendor/tokio/CHANGELOG.md
+++ b/third_party/rust_crates/vendor/tokio/CHANGELOG.md
@@ -1,3 +1,162 @@
+# 1.19.2 (June 6, 2022)
+
+This release fixes another bug in `Notified::enable`. ([#4751])
+
+[#4751]: https://github.com/tokio-rs/tokio/pull/4751
+
+# 1.19.1 (June 5, 2022)
+
+This release fixes a bug in `Notified::enable`. ([#4747])
+
+[#4747]: https://github.com/tokio-rs/tokio/pull/4747
+
+# 1.19.0 (June 3, 2022)
+
+### Added
+
+- runtime: add `is_finished` method for `JoinHandle` and `AbortHandle` ([#4709])
+- runtime: make global queue and event polling intervals configurable ([#4671])
+- sync: add `Notified::enable` ([#4705])
+- sync: add `watch::Sender::send_if_modified` ([#4591])
+- sync: add resubscribe method to broadcast::Receiver ([#4607])
+- net: add `take_error` to `TcpSocket` and `TcpStream` ([#4739])
+
+### Changed
+
+- io: refactor out usage of Weak in the io handle ([#4656])
+
+### Fixed
+
+- macros: avoid starvation in `join!` and `try_join!` ([#4624])
+
+### Documented
+
+- runtime: clarify semantics of tasks outliving `block_on` ([#4729])
+- time: fix example for `MissedTickBehavior::Burst` ([#4713])
+
+### Unstable
+
+- metrics: correctly update atomics in `IoDriverMetrics` ([#4725])
+- metrics: fix compilation with unstable, process, and rt, but without net ([#4682])
+- task: add `#[track_caller]` to `JoinSet`/`JoinMap` ([#4697])
+- task: add `Builder::{spawn_on, spawn_local_on, spawn_blocking_on}` ([#4683])
+- task: add `consume_budget` for cooperative scheduling ([#4498])
+- task: add `join_set::Builder` for configuring `JoinSet` tasks ([#4687])
+- task: update return value of `JoinSet::join_one` ([#4726])
+
+[#4498]: https://github.com/tokio-rs/tokio/pull/4498
+[#4591]: https://github.com/tokio-rs/tokio/pull/4591
+[#4607]: https://github.com/tokio-rs/tokio/pull/4607
+[#4624]: https://github.com/tokio-rs/tokio/pull/4624
+[#4656]: https://github.com/tokio-rs/tokio/pull/4656
+[#4671]: https://github.com/tokio-rs/tokio/pull/4671
+[#4682]: https://github.com/tokio-rs/tokio/pull/4682
+[#4683]: https://github.com/tokio-rs/tokio/pull/4683
+[#4687]: https://github.com/tokio-rs/tokio/pull/4687
+[#4697]: https://github.com/tokio-rs/tokio/pull/4697
+[#4705]: https://github.com/tokio-rs/tokio/pull/4705
+[#4709]: https://github.com/tokio-rs/tokio/pull/4709
+[#4713]: https://github.com/tokio-rs/tokio/pull/4713
+[#4725]: https://github.com/tokio-rs/tokio/pull/4725
+[#4726]: https://github.com/tokio-rs/tokio/pull/4726
+[#4729]: https://github.com/tokio-rs/tokio/pull/4729
+[#4739]: https://github.com/tokio-rs/tokio/pull/4739
+
+# 1.18.2 (May 5, 2022)
+
+Add missing features for the `winapi` dependency. ([#4663])
+
+[#4663]: https://github.com/tokio-rs/tokio/pull/4663
+
+# 1.18.1 (May 2, 2022)
+
+The 1.18.0 release broke the build for targets without 64-bit atomics when
+building with `tokio_unstable`. This release fixes that. ([#4649])
+
+[#4649]: https://github.com/tokio-rs/tokio/pull/4649
+
+# 1.18.0 (April 27, 2022)
+
+This release adds a number of new APIs in `tokio::net`, `tokio::signal`, and
+`tokio::sync`. In addition, it adds new unstable APIs to `tokio::task` (`Id`s
+for uniquely identifying a task, and `AbortHandle` for remotely cancelling a
+task), as well as a number of bugfixes.
+
+### Fixed
+
+- blocking: add missing `#[track_caller]` for `spawn_blocking` ([#4616])
+- macros: fix `select` macro to process 64 branches ([#4519])
+- net: fix `try_io` methods not calling Mio's `try_io` internally ([#4582])
+- runtime: recover when OS fails to spawn a new thread ([#4485])
+
+### Added
+
+- net: add `UdpSocket::peer_addr` ([#4611])
+- net: add `try_read_buf` method for named pipes ([#4626])
+- signal: add `SignalKind` `Hash`/`Eq` impls and `c_int` conversion ([#4540])
+- signal: add support for signals up to `SIGRTMAX` ([#4555])
+- sync: add `watch::Sender::send_modify` method ([#4310])
+- sync: add `broadcast::Receiver::len` method ([#4542])
+- sync: add `watch::Receiver::same_channel` method ([#4581])
+- sync: implement `Clone` for `RecvError` types ([#4560])
+
+### Changed
+
+- update `mio` to 0.8.1 ([#4582])
+- macros: rename `tokio::select!`'s internal `util` module ([#4543])
+- runtime: use `Vec::with_capacity` when building runtime ([#4553])
+
+### Documented
+
+- improve docs for `tokio_unstable` ([#4524])
+- runtime: include more documentation for thread_pool/worker ([#4511])
+- runtime: update `Handle::current`'s docs to mention `EnterGuard` ([#4567])
+- time: clarify platform specific timer resolution ([#4474])
+- signal: document that `Signal::recv` is cancel-safe ([#4634])
+- sync: `UnboundedReceiver` close docs ([#4548])
+
+### Unstable
+
+The following changes only apply when building with `--cfg tokio_unstable`:
+
+- task: add `task::Id` type ([#4630])
+- task: add `AbortHandle` type for cancelling tasks in a `JoinSet` ([#4530],
+  [#4640])
+- task: fix missing `doc(cfg(...))` attributes for `JoinSet` ([#4531])
+- task: fix broken link in `AbortHandle` RustDoc ([#4545])
+- metrics: add initial IO driver metrics ([#4507])
+
+
+[#4616]: https://github.com/tokio-rs/tokio/pull/4616
+[#4519]: https://github.com/tokio-rs/tokio/pull/4519
+[#4582]: https://github.com/tokio-rs/tokio/pull/4582
+[#4485]: https://github.com/tokio-rs/tokio/pull/4485
+[#4613]: https://github.com/tokio-rs/tokio/pull/4613
+[#4611]: https://github.com/tokio-rs/tokio/pull/4611
+[#4626]: https://github.com/tokio-rs/tokio/pull/4626
+[#4540]: https://github.com/tokio-rs/tokio/pull/4540
+[#4555]: https://github.com/tokio-rs/tokio/pull/4555
+[#4310]: https://github.com/tokio-rs/tokio/pull/4310
+[#4542]: https://github.com/tokio-rs/tokio/pull/4542
+[#4581]: https://github.com/tokio-rs/tokio/pull/4581
+[#4560]: https://github.com/tokio-rs/tokio/pull/4560
+[#4631]: https://github.com/tokio-rs/tokio/pull/4631
+[#4582]: https://github.com/tokio-rs/tokio/pull/4582
+[#4543]: https://github.com/tokio-rs/tokio/pull/4543
+[#4553]: https://github.com/tokio-rs/tokio/pull/4553
+[#4524]: https://github.com/tokio-rs/tokio/pull/4524
+[#4511]: https://github.com/tokio-rs/tokio/pull/4511
+[#4567]: https://github.com/tokio-rs/tokio/pull/4567
+[#4474]: https://github.com/tokio-rs/tokio/pull/4474
+[#4634]: https://github.com/tokio-rs/tokio/pull/4634
+[#4548]: https://github.com/tokio-rs/tokio/pull/4548
+[#4630]: https://github.com/tokio-rs/tokio/pull/4630
+[#4530]: https://github.com/tokio-rs/tokio/pull/4530
+[#4640]: https://github.com/tokio-rs/tokio/pull/4640
+[#4531]: https://github.com/tokio-rs/tokio/pull/4531
+[#4545]: https://github.com/tokio-rs/tokio/pull/4545
+[#4507]: https://github.com/tokio-rs/tokio/pull/4507
+
 # 1.17.0 (February 16, 2022)
 
 This release updates the minimum supported Rust version (MSRV) to 1.49, the
diff --git a/third_party/rust_crates/vendor/tokio/Cargo.toml b/third_party/rust_crates/vendor/tokio/Cargo.toml
index 1141c07..e42efe4 100644
--- a/third_party/rust_crates/vendor/tokio/Cargo.toml
+++ b/third_party/rust_crates/vendor/tokio/Cargo.toml
@@ -13,22 +13,46 @@
 edition = "2018"
 rust-version = "1.49"
 name = "tokio"
-version = "1.17.0"
+version = "1.19.2"
 authors = ["Tokio Contributors <team@tokio.rs>"]
-description = "An event-driven, non-blocking I/O platform for writing asynchronous I/O\nbacked applications.\n"
+description = """
+An event-driven, non-blocking I/O platform for writing asynchronous I/O
+backed applications.
+"""
 homepage = "https://tokio.rs"
 readme = "README.md"
-keywords = ["io", "async", "non-blocking", "futures"]
-categories = ["asynchronous", "network-programming"]
+keywords = [
+    "io",
+    "async",
+    "non-blocking",
+    "futures",
+]
+categories = [
+    "asynchronous",
+    "network-programming",
+]
 license = "MIT"
 repository = "https://github.com/tokio-rs/tokio"
+
 [package.metadata.docs.rs]
 all-features = true
-rustc-args = ["--cfg", "tokio_unstable"]
-rustdoc-args = ["--cfg", "docsrs", "--cfg", "tokio_unstable"]
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+    "--cfg",
+    "tokio_unstable",
+]
+rustc-args = [
+    "--cfg",
+    "tokio_unstable",
+]
 
 [package.metadata.playground]
-features = ["full", "test-util"]
+features = [
+    "full",
+    "test-util",
+]
+
 [dependencies.bytes]
 version = "1.0.0"
 optional = true
@@ -38,7 +62,7 @@
 optional = true
 
 [dependencies.mio]
-version = "0.8.0"
+version = "0.8.1"
 optional = true
 
 [dependencies.num_cpus]
@@ -64,6 +88,7 @@
 [dependencies.tokio-macros]
 version = "1.7.0"
 optional = true
+
 [dev-dependencies.async-stream]
 version = "0.3"
 
@@ -72,7 +97,7 @@
 features = ["async-await"]
 
 [dev-dependencies.mockall]
-version = "0.10.2"
+version = "0.11.1"
 
 [dev-dependencies.tempfile]
 version = "3.1.0"
@@ -86,22 +111,74 @@
 [features]
 default = []
 fs = []
-full = ["fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "sync", "time"]
+full = [
+    "fs",
+    "io-util",
+    "io-std",
+    "macros",
+    "net",
+    "parking_lot",
+    "process",
+    "rt",
+    "rt-multi-thread",
+    "signal",
+    "sync",
+    "time",
+]
 io-std = []
-io-util = ["memchr", "bytes"]
+io-util = [
+    "memchr",
+    "bytes",
+]
 macros = ["tokio-macros"]
-net = ["libc", "mio/os-poll", "mio/os-ext", "mio/net", "socket2", "winapi/namedpipeapi"]
-process = ["bytes", "once_cell", "libc", "mio/os-poll", "mio/os-ext", "mio/net", "signal-hook-registry", "winapi/threadpoollegacyapiset"]
-rt = []
-rt-multi-thread = ["num_cpus", "rt"]
-signal = ["once_cell", "libc", "mio/os-poll", "mio/net", "mio/os-ext", "signal-hook-registry", "winapi/consoleapi"]
+net = [
+    "libc",
+    "mio/os-poll",
+    "mio/os-ext",
+    "mio/net",
+    "socket2",
+    "winapi/namedpipeapi",
+]
+process = [
+    "bytes",
+    "once_cell",
+    "libc",
+    "mio/os-poll",
+    "mio/os-ext",
+    "mio/net",
+    "signal-hook-registry",
+    "winapi/threadpoollegacyapiset",
+]
+rt = ["once_cell"]
+rt-multi-thread = [
+    "num_cpus",
+    "rt",
+]
+signal = [
+    "once_cell",
+    "libc",
+    "mio/os-poll",
+    "mio/net",
+    "mio/os-ext",
+    "signal-hook-registry",
+    "winapi/consoleapi",
+]
 stats = []
 sync = []
-test-util = ["rt", "sync", "time"]
+test-util = [
+    "rt",
+    "sync",
+    "time",
+]
 time = []
+
 [target."cfg(loom)".dev-dependencies.loom]
 version = "0.5.2"
-features = ["futures", "checkpoint"]
+features = [
+    "futures",
+    "checkpoint",
+]
+
 [target."cfg(not(target_arch = \"wasm32\"))".dev-dependencies.proptest]
 version = "1"
 
@@ -110,16 +187,20 @@
 
 [target."cfg(not(target_arch = \"wasm32\"))".dev-dependencies.socket2]
 version = "0.4"
+
 [target."cfg(target_arch = \"wasm32\")".dev-dependencies.wasm-bindgen-test]
 version = "0.3.0"
+
 [target."cfg(target_os = \"freebsd\")".dev-dependencies.mio-aio]
 version = "0.6.0"
 features = ["tokio"]
+
 [target."cfg(tokio_unstable)".dependencies.tracing]
 version = "0.1.25"
 features = ["std"]
 optional = true
 default-features = false
+
 [target."cfg(unix)".dependencies.libc]
 version = "0.2.42"
 optional = true
@@ -127,14 +208,30 @@
 [target."cfg(unix)".dependencies.signal-hook-registry]
 version = "1.1.1"
 optional = true
+
 [target."cfg(unix)".dev-dependencies.libc]
 version = "0.2.42"
 
 [target."cfg(unix)".dev-dependencies.nix]
-version = "0.23"
+version = "0.24"
+features = [
+    "fs",
+    "socket",
+]
+default-features = false
+
 [target."cfg(windows)".dependencies.winapi]
 version = "0.3.8"
+features = [
+    "std",
+    "winsock2",
+    "mswsock",
+    "handleapi",
+    "ws2ipdef",
+    "ws2tcpip",
+]
 optional = true
 default-features = false
+
 [target."cfg(windows)".dev-dependencies.ntapi]
 version = "0.3.6"
diff --git a/third_party/rust_crates/vendor/tokio/README.md b/third_party/rust_crates/vendor/tokio/README.md
index 1cce34a..307369e 100644
--- a/third_party/rust_crates/vendor/tokio/README.md
+++ b/third_party/rust_crates/vendor/tokio/README.md
@@ -56,7 +56,7 @@
 
 ```toml
 [dependencies]
-tokio = { version = "1.17.0", features = ["full"] }
+tokio = { version = "1.19.2", features = ["full"] }
 ```
 Then, on your main.rs:
 
@@ -163,6 +163,18 @@
 
 ## Supported Rust Versions
 
+<!--
+When updating this, also update:
+- .github/workflows/ci.yml
+- CONTRIBUTING.md
+- README.md
+- tokio/README.md
+- tokio/Cargo.toml
+- tokio-util/Cargo.toml
+- tokio-test/Cargo.toml
+- tokio-stream/Cargo.toml
+-->
+
 Tokio will keep a rolling MSRV (minimum supported rust version) policy of **at
 least** 6 months. When increasing the MSRV, the new Rust version must have been
 released at least six months ago. The current MSRV is 1.49.0.
@@ -180,18 +192,18 @@
 released as a new patch release for each LTS minor version. Our current LTS
 releases are:
 
- * `1.8.x` - LTS release until February 2022.
  * `1.14.x` - LTS release until June 2022.
+ * `1.18.x` - LTS release until January 2023
 
 Each LTS release will continue to receive backported fixes for at least half a
 year. If you wish to use a fixed minor release in your project, we recommend
 that you use an LTS release.
 
 To use a fixed minor version, you can specify the version with a tilde. For
-example, to specify that you wish to use the newest `1.8.x` patch release, you
+example, to specify that you wish to use the newest `1.14.x` patch release, you
 can use the following dependency specification:
 ```text
-tokio = { version = "~1.8", features = [...] }
+tokio = { version = "~1.14", features = [...] }
 ```
 
 ## License
diff --git a/third_party/rust_crates/vendor/tokio/src/future/poll_fn.rs b/third_party/rust_crates/vendor/tokio/src/future/poll_fn.rs
index d82ce89..041e3d7 100644
--- a/third_party/rust_crates/vendor/tokio/src/future/poll_fn.rs
+++ b/third_party/rust_crates/vendor/tokio/src/future/poll_fn.rs
@@ -35,6 +35,6 @@
     type Output = T;
 
     fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
-        (&mut self.f)(cx)
+        (self.f)(cx)
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/io/driver/metrics.rs b/third_party/rust_crates/vendor/tokio/src/io/driver/metrics.rs
new file mode 100644
index 0000000..ec341ef
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/src/io/driver/metrics.rs
@@ -0,0 +1,24 @@
+//! This file contains mocks of the metrics types used in the I/O driver.
+//!
+//! The reason these mocks don't live in `src/runtime/mock.rs` is because
+//! these need to be available in the case when `net` is enabled but
+//! `rt` is not.
+
+cfg_not_rt_and_metrics_and_net! {
+    #[derive(Default)]
+    pub(crate) struct IoDriverMetrics {}
+
+    impl IoDriverMetrics {
+        pub(crate) fn incr_fd_count(&self) {}
+        pub(crate) fn dec_fd_count(&self) {}
+        pub(crate) fn incr_ready_count_by(&self, _amt: u64) {}
+    }
+}
+
+cfg_net! {
+    cfg_rt! {
+        cfg_metrics! {
+            pub(crate) use crate::runtime::IoDriverMetrics;
+        }
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/io/driver/mod.rs b/third_party/rust_crates/vendor/tokio/src/io/driver/mod.rs
index 19f67a2..66bc318 100644
--- a/third_party/rust_crates/vendor/tokio/src/io/driver/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/io/driver/mod.rs
@@ -14,13 +14,17 @@
 mod scheduled_io;
 use scheduled_io::ScheduledIo;
 
+mod metrics;
+
 use crate::park::{Park, Unpark};
 use crate::util::slab::{self, Slab};
-use crate::{loom::sync::Mutex, util::bit};
+use crate::{loom::sync::RwLock, util::bit};
+
+use metrics::IoDriverMetrics;
 
 use std::fmt;
 use std::io;
-use std::sync::{Arc, Weak};
+use std::sync::Arc;
 use std::time::Duration;
 
 /// I/O driver, backed by Mio.
@@ -33,10 +37,8 @@
     events: Option<mio::Events>,
 
     /// Primary slab handle containing the state for each resource registered
-    /// with this driver. During Drop this is moved into the Inner structure, so
-    /// this is an Option to allow it to be vacated (until Drop this is always
-    /// Some).
-    resources: Option<Slab<ScheduledIo>>,
+    /// with this driver.
+    resources: Slab<ScheduledIo>,
 
     /// The system event queue.
     poll: mio::Poll,
@@ -48,7 +50,7 @@
 /// A reference to an I/O driver.
 #[derive(Clone)]
 pub(crate) struct Handle {
-    inner: Weak<Inner>,
+    pub(super) inner: Arc<Inner>,
 }
 
 #[derive(Debug)]
@@ -57,23 +59,22 @@
     pub(crate) ready: Ready,
 }
 
-pub(super) struct Inner {
-    /// Primary slab handle containing the state for each resource registered
-    /// with this driver.
-    ///
-    /// The ownership of this slab is moved into this structure during
-    /// `Driver::drop`, so that `Inner::drop` can notify all outstanding handles
-    /// without risking new ones being registered in the meantime.
-    resources: Mutex<Option<Slab<ScheduledIo>>>,
+struct IoDispatcher {
+    allocator: slab::Allocator<ScheduledIo>,
+    is_shutdown: bool,
+}
 
+pub(super) struct Inner {
     /// Registers I/O resources.
     registry: mio::Registry,
 
     /// Allocates `ScheduledIo` handles when creating new resources.
-    pub(super) io_dispatch: slab::Allocator<ScheduledIo>,
+    io_dispatch: RwLock<IoDispatcher>,
 
     /// Used to wake up the reactor from a call to `turn`.
     waker: mio::Waker,
+
+    metrics: IoDriverMetrics,
 }
 
 #[derive(Debug, Eq, PartialEq, Clone, Copy)]
@@ -124,12 +125,12 @@
             tick: 0,
             events: Some(mio::Events::with_capacity(1024)),
             poll,
-            resources: Some(slab),
+            resources: slab,
             inner: Arc::new(Inner {
-                resources: Mutex::new(None),
                 registry,
-                io_dispatch: allocator,
+                io_dispatch: RwLock::new(IoDispatcher::new(allocator)),
                 waker,
+                metrics: IoDriverMetrics::default(),
             }),
         })
     }
@@ -142,7 +143,7 @@
     /// to bind them to this event loop.
     pub(crate) fn handle(&self) -> Handle {
         Handle {
-            inner: Arc::downgrade(&self.inner),
+            inner: Arc::clone(&self.inner),
         }
     }
 
@@ -153,7 +154,7 @@
         self.tick = self.tick.wrapping_add(1);
 
         if self.tick == COMPACT_INTERVAL {
-            self.resources.as_mut().unwrap().compact()
+            self.resources.compact()
         }
 
         let mut events = self.events.take().expect("i/o driver event store missing");
@@ -167,14 +168,18 @@
         }
 
         // Process all the events that came in, dispatching appropriately
+        let mut ready_count = 0;
         for event in events.iter() {
             let token = event.token();
 
             if token != TOKEN_WAKEUP {
                 self.dispatch(token, Ready::from_mio(event));
+                ready_count += 1;
             }
         }
 
+        self.inner.metrics.incr_ready_count_by(ready_count);
+
         self.events = Some(events);
 
         Ok(())
@@ -183,7 +188,7 @@
     fn dispatch(&mut self, token: mio::Token, ready: Ready) {
         let addr = slab::Address::from_usize(ADDRESS.unpack(token.0));
 
-        let resources = self.resources.as_mut().unwrap();
+        let resources = &mut self.resources;
 
         let io = match resources.get(addr) {
             Some(io) => io,
@@ -203,22 +208,7 @@
 
 impl Drop for Driver {
     fn drop(&mut self) {
-        (*self.inner.resources.lock()) = self.resources.take();
-    }
-}
-
-impl Drop for Inner {
-    fn drop(&mut self) {
-        let resources = self.resources.lock().take();
-
-        if let Some(mut slab) = resources {
-            slab.for_each(|io| {
-                // If a task is waiting on the I/O resource, notify it. The task
-                // will then attempt to use the I/O resource and fail due to the
-                // driver being shutdown.
-                io.shutdown();
-            });
-        }
+        self.shutdown();
     }
 }
 
@@ -240,7 +230,16 @@
         Ok(())
     }
 
-    fn shutdown(&mut self) {}
+    fn shutdown(&mut self) {
+        if self.inner.shutdown() {
+            self.resources.for_each(|io| {
+                // If a task is waiting on the I/O resource, notify it. The task
+                // will then attempt to use the I/O resource and fail due to the
+                // driver being shutdown. And shutdown will clear all wakers.
+                io.shutdown();
+            });
+        }
+    }
 }
 
 impl fmt::Debug for Driver {
@@ -279,6 +278,16 @@
     }
 }
 
+cfg_net! {
+    cfg_metrics! {
+        impl Handle {
+            pub(crate) fn metrics(&self) -> &IoDriverMetrics {
+                &self.inner.metrics
+            }
+        }
+    }
+}
+
 impl Handle {
     /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise
     /// makes the next call to `turn` return immediately.
@@ -290,13 +299,7 @@
     /// blocked in `turn`, then the next call to `turn` will not block and
     /// return immediately.
     fn wakeup(&self) {
-        if let Some(inner) = self.inner() {
-            inner.waker.wake().expect("failed to wake I/O driver");
-        }
-    }
-
-    pub(super) fn inner(&self) -> Option<Arc<Inner>> {
-        self.inner.upgrade()
+        self.inner.waker.wake().expect("failed to wake I/O driver");
     }
 }
 
@@ -312,6 +315,17 @@
     }
 }
 
+// ===== impl IoDispatcher =====
+
+impl IoDispatcher {
+    fn new(allocator: slab::Allocator<ScheduledIo>) -> Self {
+        Self {
+            allocator,
+            is_shutdown: false,
+        }
+    }
+}
+
 // ===== impl Inner =====
 
 impl Inner {
@@ -323,24 +337,55 @@
         source: &mut impl mio::event::Source,
         interest: Interest,
     ) -> io::Result<slab::Ref<ScheduledIo>> {
-        let (address, shared) = self.io_dispatch.allocate().ok_or_else(|| {
-            io::Error::new(
-                io::ErrorKind::Other,
-                "reactor at max registered I/O resources",
-            )
-        })?;
+        let (address, shared) = self.allocate()?;
 
         let token = GENERATION.pack(shared.generation(), ADDRESS.pack(address.as_usize(), 0));
 
         self.registry
             .register(source, mio::Token(token), interest.to_mio())?;
 
+        self.metrics.incr_fd_count();
+
         Ok(shared)
     }
 
     /// Deregisters an I/O resource from the reactor.
     pub(super) fn deregister_source(&self, source: &mut impl mio::event::Source) -> io::Result<()> {
-        self.registry.deregister(source)
+        self.registry.deregister(source)?;
+
+        self.metrics.dec_fd_count();
+
+        Ok(())
+    }
+
+    /// shutdown the dispatcher.
+    fn shutdown(&self) -> bool {
+        let mut io = self.io_dispatch.write().unwrap();
+        if io.is_shutdown {
+            return false;
+        }
+        io.is_shutdown = true;
+        true
+    }
+
+    fn is_shutdown(&self) -> bool {
+        return self.io_dispatch.read().unwrap().is_shutdown;
+    }
+
+    fn allocate(&self) -> io::Result<(slab::Address, slab::Ref<ScheduledIo>)> {
+        let io = self.io_dispatch.read().unwrap();
+        if io.is_shutdown {
+            return Err(io::Error::new(
+                io::ErrorKind::Other,
+                "failed to find event loop",
+            ));
+        }
+        io.allocator.allocate().ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::Other,
+                "reactor at max registered I/O resources",
+            )
+        })
     }
 }
 
diff --git a/third_party/rust_crates/vendor/tokio/src/io/driver/registration.rs b/third_party/rust_crates/vendor/tokio/src/io/driver/registration.rs
index 7350be6..c939365 100644
--- a/third_party/rust_crates/vendor/tokio/src/io/driver/registration.rs
+++ b/third_party/rust_crates/vendor/tokio/src/io/driver/registration.rs
@@ -72,14 +72,7 @@
         interest: Interest,
         handle: Handle,
     ) -> io::Result<Registration> {
-        let shared = if let Some(inner) = handle.inner() {
-            inner.add_source(io, interest)?
-        } else {
-            return Err(io::Error::new(
-                io::ErrorKind::Other,
-                "failed to find event loop",
-            ));
-        };
+        let shared = handle.inner.add_source(io, interest)?;
 
         Ok(Registration { handle, shared })
     }
@@ -101,11 +94,7 @@
     ///
     /// `Err` is returned if an error is encountered.
     pub(crate) fn deregister(&mut self, io: &mut impl Source) -> io::Result<()> {
-        let inner = match self.handle.inner() {
-            Some(inner) => inner,
-            None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")),
-        };
-        inner.deregister_source(io)
+        self.handle.inner.deregister_source(io)
     }
 
     pub(crate) fn clear_readiness(&self, event: ReadyEvent) {
@@ -157,7 +146,7 @@
         let coop = ready!(crate::coop::poll_proceed(cx));
         let ev = ready!(self.shared.poll_readiness(cx, direction));
 
-        if self.handle.inner().is_none() {
+        if self.handle.inner.is_shutdown() {
             return Poll::Ready(Err(gone()));
         }
 
@@ -235,7 +224,7 @@
             pin!(fut);
 
             crate::future::poll_fn(|cx| {
-                if self.handle.inner().is_none() {
+                if self.handle.inner.is_shutdown() {
                     return Poll::Ready(Err(io::Error::new(
                         io::ErrorKind::Other,
                         crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR
diff --git a/third_party/rust_crates/vendor/tokio/src/io/stdio_common.rs b/third_party/rust_crates/vendor/tokio/src/io/stdio_common.rs
index 7e4a198..2715ba7 100644
--- a/third_party/rust_crates/vendor/tokio/src/io/stdio_common.rs
+++ b/third_party/rust_crates/vendor/tokio/src/io/stdio_common.rs
@@ -42,7 +42,7 @@
         // for further code. Since `AsyncWrite` can always shrink
         // buffer at its discretion, excessive (i.e. in tests) shrinking
         // does not break correctness.
-        // 2. If buffer is small, it will not be shrinked.
+        // 2. If buffer is small, it will not be shrunk.
         // That's why, it's "textness" will not change, so we don't have
         // to fixup it.
         if cfg!(not(any(target_os = "windows", test))) || buf.len() <= crate::io::blocking::MAX_BUF
@@ -193,7 +193,7 @@
     fn test_pseudo_text() {
         // In this test we write a piece of binary data, whose beginning is
         // text though. We then validate that even in this corner case buffer
-        // was not shrinked too much.
+        // was not shrunk too much.
         let checked_count = super::MAGIC_CONST * super::MAX_BYTES_PER_CHAR;
         let mut data: Vec<u8> = str::repeat("a", checked_count).into();
         data.extend(std::iter::repeat(0b1010_1010).take(MAX_BUF - checked_count + 1));
@@ -212,7 +212,7 @@
             writer.write_history.iter().copied().sum::<usize>(),
             data.len()
         );
-        // Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrinked
+        // Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrunk
         // from the buffer: one because it was outside of MAX_BUF boundary, and
         // up to one "utf8 code point".
         assert!(data.len() - writer.write_history[0] <= super::MAX_BYTES_PER_CHAR + 1);
diff --git a/third_party/rust_crates/vendor/tokio/src/io/util/vec_with_initialized.rs b/third_party/rust_crates/vendor/tokio/src/io/util/vec_with_initialized.rs
index 208cc93..a9b94e3 100644
--- a/third_party/rust_crates/vendor/tokio/src/io/util/vec_with_initialized.rs
+++ b/third_party/rust_crates/vendor/tokio/src/io/util/vec_with_initialized.rs
@@ -1,19 +1,18 @@
 use crate::io::ReadBuf;
 use std::mem::MaybeUninit;
 
-mod private {
-    pub trait Sealed {}
+/// Something that looks like a `Vec<u8>`.
+///
+/// # Safety
+///
+/// The implementor must guarantee that the vector returned by the
+/// `as_mut` and `as_mut` methods do not change from one call to
+/// another.
+pub(crate) unsafe trait VecU8: AsRef<Vec<u8>> + AsMut<Vec<u8>> {}
 
-    impl Sealed for Vec<u8> {}
-    impl Sealed for &mut Vec<u8> {}
-}
+unsafe impl VecU8 for Vec<u8> {}
+unsafe impl VecU8 for &mut Vec<u8> {}
 
-/// A sealed trait that constrains the generic type parameter in `VecWithInitialized<V>`.  That struct's safety relies
-/// on certain invariants upheld by `Vec<u8>`.
-pub(crate) trait VecU8: AsMut<Vec<u8>> + private::Sealed {}
-
-impl VecU8 for Vec<u8> {}
-impl VecU8 for &mut Vec<u8> {}
 /// This struct wraps a `Vec<u8>` or `&mut Vec<u8>`, combining it with a
 /// `num_initialized`, which keeps track of the number of initialized bytes
 /// in the unused capacity.
@@ -64,8 +63,8 @@
     }
 
     #[cfg(feature = "io-util")]
-    pub(crate) fn is_empty(&mut self) -> bool {
-        self.vec.as_mut().is_empty()
+    pub(crate) fn is_empty(&self) -> bool {
+        self.vec.as_ref().is_empty()
     }
 
     pub(crate) fn get_read_buf<'a>(&'a mut self) -> ReadBuf<'a> {
diff --git a/third_party/rust_crates/vendor/tokio/src/lib.rs b/third_party/rust_crates/vendor/tokio/src/lib.rs
index 35295d8..c0d7e62 100644
--- a/third_party/rust_crates/vendor/tokio/src/lib.rs
+++ b/third_party/rust_crates/vendor/tokio/src/lib.rs
@@ -114,7 +114,7 @@
 //! The [`tokio::sync`] module contains synchronization primitives to use when
 //! needing to communicate or share data. These include:
 //!
-//! * channels ([`oneshot`], [`mpsc`], and [`watch`]), for sending values
+//! * channels ([`oneshot`], [`mpsc`], [`watch`], and [`broadcast`]), for sending values
 //!   between tasks,
 //! * a non-blocking [`Mutex`], for controlling access to a shared, mutable
 //!   value,
@@ -130,6 +130,7 @@
 //! [`oneshot`]: crate::sync::oneshot
 //! [`mpsc`]: crate::sync::mpsc
 //! [`watch`]: crate::sync::watch
+//! [`broadcast`]: crate::sync::broadcast
 //!
 //! The [`tokio::time`] module provides utilities for tracking time and
 //! scheduling work. This includes functions for setting [timeouts][timeout] for
@@ -340,13 +341,43 @@
 //!
 //! ### Unstable features
 //!
-//! These feature flags enable **unstable** features. The public API may break in 1.x
-//! releases. To enable these features, the `--cfg tokio_unstable` must be passed to
-//! `rustc` when compiling. This is easiest done using the `RUSTFLAGS` env variable:
-//! `RUSTFLAGS="--cfg tokio_unstable"`.
+//! Some feature flags are only available when specifying the `tokio_unstable` flag:
 //!
 //! - `tracing`: Enables tracing events.
 //!
+//! Likewise, some parts of the API are only available with the same flag:
+//!
+//! - [`task::JoinSet`]
+//! - [`task::Builder`]
+//!  
+//! This flag enables **unstable** features. The public API of these features
+//! may break in 1.x releases. To enable these features, the `--cfg
+//! tokio_unstable` argument must be passed to `rustc` when compiling. This
+//! serves to explicitly opt-in to features which may break semver conventions,
+//! since Cargo [does not yet directly support such opt-ins][unstable features].
+//!
+//! You can specify it in your project's `.cargo/config.toml` file:
+//!
+//! ```toml
+//! [build]
+//! rustflags = ["--cfg", "tokio_unstable"]
+//! ```
+//!
+//! Alternatively, you can specify it with an environment variable:
+//!
+//! ```sh
+//! ## Many *nix shells:
+//! export RUSTFLAGS="--cfg tokio_unstable"
+//! cargo build
+//! ```
+//!
+//! ```powershell
+//! ## Windows PowerShell:
+//! $Env:RUSTFLAGS="--cfg tokio_unstable"
+//! cargo build
+//! ```
+//!
+//! [unstable features]: https://internals.rust-lang.org/t/feature-request-unstable-opt-in-non-transitive-crate-features/16193#why-not-a-crate-feature-2
 //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
 
 // Test that pointer width is compatible. This asserts that e.g. usize is at
@@ -483,7 +514,7 @@
 
 #[cfg(all(not(docsrs), windows, feature = "net"))]
 #[allow(unused)]
-pub(crate) use ::winapi;
+pub(crate) use winapi;
 
 cfg_macros! {
     /// Implementation detail of the `select!` macro. This macro is **not**
diff --git a/third_party/rust_crates/vendor/tokio/src/loom/std/atomic_u64.rs b/third_party/rust_crates/vendor/tokio/src/loom/std/atomic_u64.rs
index 113992d..ac20f35 100644
--- a/third_party/rust_crates/vendor/tokio/src/loom/std/atomic_u64.rs
+++ b/third_party/rust_crates/vendor/tokio/src/loom/std/atomic_u64.rs
@@ -75,4 +75,12 @@
             self.compare_exchange(current, new, success, failure)
         }
     }
+
+    impl Default for AtomicU64 {
+        fn default() -> AtomicU64 {
+            Self {
+                inner: Mutex::new(0),
+            }
+        }
+    }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/macros/cfg.rs b/third_party/rust_crates/vendor/tokio/src/macros/cfg.rs
index b6beb3d..45ae5f9 100644
--- a/third_party/rust_crates/vendor/tokio/src/macros/cfg.rs
+++ b/third_party/rust_crates/vendor/tokio/src/macros/cfg.rs
@@ -195,6 +195,22 @@
     }
 }
 
+macro_rules! cfg_not_rt_and_metrics_and_net {
+    ($($item:item)*) => {
+        $( #[cfg(not(all(feature = "net", feature = "rt", all(tokio_unstable, not(loom)))))]$item )*
+    }
+}
+
+macro_rules! cfg_net_or_process {
+    ($($item:item)*) => {
+        $(
+            #[cfg(any(feature = "net", feature = "process"))]
+            #[cfg_attr(docsrs, doc(cfg(any(feature = "net", feature = "process"))))]
+            $item
+        )*
+    }
+}
+
 macro_rules! cfg_net {
     ($($item:item)*) => {
         $(
diff --git a/third_party/rust_crates/vendor/tokio/src/macros/join.rs b/third_party/rust_crates/vendor/tokio/src/macros/join.rs
index f91b5f1..9697936 100644
--- a/third_party/rust_crates/vendor/tokio/src/macros/join.rs
+++ b/third_party/rust_crates/vendor/tokio/src/macros/join.rs
@@ -12,7 +12,7 @@
 /// for **all** branches complete regardless if any complete with `Err`. Use
 /// [`try_join!`] to return early when `Err` is encountered.
 ///
-/// [`try_join!`]: macro@try_join
+/// [`try_join!`]: crate::try_join
 ///
 /// # Notes
 ///
@@ -60,6 +60,9 @@
         // normalization is complete.
         ( $($count:tt)* )
 
+        // The expression `0+1+1+ ... +1` equal to the number of branches.
+        ( $($total:tt)* )
+
         // Normalized join! branches
         $( ( $($skip:tt)* ) $e:expr, )*
 
@@ -71,22 +74,54 @@
         // the requirement of `Pin::new_unchecked` called below.
         let mut futures = ( $( maybe_done($e), )* );
 
+        // Each time the future created by poll_fn is polled, a different future will be polled first
+        // to ensure every future passed to join! gets a chance to make progress even if
+        // one of the futures consumes the whole budget.
+        //
+        // This is number of futures that will be skipped in the first loop
+        // iteration the next time.
+        let mut skip_next_time: u32 = 0;
+
         poll_fn(move |cx| {
+            const COUNT: u32 = $($total)*;
+
             let mut is_pending = false;
 
+            let mut to_run = COUNT;
+
+            // The number of futures that will be skipped in the first loop iteration.
+            let mut skip = skip_next_time;
+
+            skip_next_time = if skip + 1 == COUNT { 0 } else { skip + 1 };
+
+            // This loop runs twice and the first `skip` futures
+            // are not polled in the first iteration.
+            loop {
             $(
-                // Extract the future for this branch from the tuple.
-                let ( $($skip,)* fut, .. ) = &mut futures;
+                if skip == 0 {
+                    if to_run == 0 {
+                        // Every future has been polled
+                        break;
+                    }
+                    to_run -= 1;
 
-                // Safety: future is stored on the stack above
-                // and never moved.
-                let mut fut = unsafe { Pin::new_unchecked(fut) };
+                    // Extract the future for this branch from the tuple.
+                    let ( $($skip,)* fut, .. ) = &mut futures;
 
-                // Try polling
-                if fut.poll(cx).is_pending() {
-                    is_pending = true;
+                    // Safety: future is stored on the stack above
+                    // and never moved.
+                    let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+                    // Try polling
+                    if fut.poll(cx).is_pending() {
+                        is_pending = true;
+                    }
+                } else {
+                    // Future skipped, one less future to skip in the next iteration
+                    skip -= 1;
                 }
             )*
+            }
 
             if is_pending {
                 Pending
@@ -107,13 +142,13 @@
 
     // ===== Normalize =====
 
-    (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
-        $crate::join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*)
+    (@ { ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
+        $crate::join!(@{ ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*)
     };
 
     // ===== Entry point =====
 
     ( $($e:expr),* $(,)?) => {
-        $crate::join!(@{ () } $($e,)*)
+        $crate::join!(@{ () (0) } $($e,)*)
     };
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/macros/select.rs b/third_party/rust_crates/vendor/tokio/src/macros/select.rs
index 051f8cb..f38aee0 100644
--- a/third_party/rust_crates/vendor/tokio/src/macros/select.rs
+++ b/third_party/rust_crates/vendor/tokio/src/macros/select.rs
@@ -101,6 +101,7 @@
 ///  * [`tokio::sync::watch::Receiver::changed`](crate::sync::watch::Receiver::changed)
 ///  * [`tokio::net::TcpListener::accept`](crate::net::TcpListener::accept)
 ///  * [`tokio::net::UnixListener::accept`](crate::net::UnixListener::accept)
+///  * [`tokio::signal::unix::Signal::recv`](crate::signal::unix::Signal::recv)
 ///  * [`tokio::io::AsyncReadExt::read`](crate::io::AsyncReadExt::read) on any `AsyncRead`
 ///  * [`tokio::io::AsyncReadExt::read_buf`](crate::io::AsyncReadExt::read_buf) on any `AsyncRead`
 ///  * [`tokio::io::AsyncWriteExt::write`](crate::io::AsyncWriteExt::write) on any `AsyncWrite`
@@ -429,7 +430,8 @@
         //
         // This module is defined within a scope and should not leak out of this
         // macro.
-        mod util {
+        #[doc(hidden)]
+        mod __tokio_select_util {
             // Generate an enum with one variant per select branch
             $crate::select_priv_declare_output_enum!( ( $($count)* ) );
         }
@@ -442,13 +444,13 @@
 
         const BRANCHES: u32 = $crate::count!( $($count)* );
 
-        let mut disabled: util::Mask = Default::default();
+        let mut disabled: __tokio_select_util::Mask = Default::default();
 
         // First, invoke all the pre-conditions. For any that return true,
         // set the appropriate bit in `disabled`.
         $(
             if !$c {
-                let mask: util::Mask = 1 << $crate::count!( $($skip)* );
+                let mask: __tokio_select_util::Mask = 1 << $crate::count!( $($skip)* );
                 disabled |= mask;
             }
         )*
@@ -525,7 +527,7 @@
                                 }
 
                                 // The select is complete, return the value
-                                return Ready($crate::select_variant!(util::Out, ($($skip)*))(out));
+                                return Ready($crate::select_variant!(__tokio_select_util::Out, ($($skip)*))(out));
                             }
                         )*
                         _ => unreachable!("reaching this means there probably is an off by one bug"),
@@ -536,16 +538,16 @@
                     Pending
                 } else {
                     // All branches have been disabled.
-                    Ready(util::Out::Disabled)
+                    Ready(__tokio_select_util::Out::Disabled)
                 }
             }).await
         };
 
         match output {
             $(
-                $crate::select_variant!(util::Out, ($($skip)*) ($bind)) => $handle,
+                $crate::select_variant!(__tokio_select_util::Out, ($($skip)*) ($bind)) => $handle,
             )*
-            util::Out::Disabled => $else,
+            __tokio_select_util::Out::Disabled => $else,
             _ => unreachable!("failed to match bind"),
         }
     }};
@@ -801,6 +803,9 @@
     (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
         63
     };
+    (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+        64
+    };
 }
 
 #[macro_export]
diff --git a/third_party/rust_crates/vendor/tokio/src/macros/try_join.rs b/third_party/rust_crates/vendor/tokio/src/macros/try_join.rs
index 6d3a893..c80395c 100644
--- a/third_party/rust_crates/vendor/tokio/src/macros/try_join.rs
+++ b/third_party/rust_crates/vendor/tokio/src/macros/try_join.rs
@@ -106,6 +106,9 @@
         // normalization is complete.
         ( $($count:tt)* )
 
+        // The expression `0+1+1+ ... +1` equal to the number of branches.
+        ( $($total:tt)* )
+
         // Normalized try_join! branches
         $( ( $($skip:tt)* ) $e:expr, )*
 
@@ -117,24 +120,56 @@
         // the requirement of `Pin::new_unchecked` called below.
         let mut futures = ( $( maybe_done($e), )* );
 
+        // Each time the future created by poll_fn is polled, a different future will be polled first
+        // to ensure every future passed to join! gets a chance to make progress even if
+        // one of the futures consumes the whole budget.
+        //
+        // This is number of futures that will be skipped in the first loop
+        // iteration the next time.
+        let mut skip_next_time: u32 = 0;
+
         poll_fn(move |cx| {
+            const COUNT: u32 = $($total)*;
+
             let mut is_pending = false;
 
+            let mut to_run = COUNT;
+
+            // The number of futures that will be skipped in the first loop iteration
+            let mut skip = skip_next_time;
+
+            skip_next_time = if skip + 1 == COUNT { 0 } else { skip + 1 };
+
+            // This loop runs twice and the first `skip` futures
+            // are not polled in the first iteration.
+            loop {
             $(
-                // Extract the future for this branch from the tuple.
-                let ( $($skip,)* fut, .. ) = &mut futures;
+                if skip == 0 {
+                    if to_run == 0 {
+                        // Every future has been polled
+                        break;
+                    }
+                    to_run -= 1;
 
-                // Safety: future is stored on the stack above
-                // and never moved.
-                let mut fut = unsafe { Pin::new_unchecked(fut) };
+                    // Extract the future for this branch from the tuple.
+                    let ( $($skip,)* fut, .. ) = &mut futures;
 
-                // Try polling
-                if fut.as_mut().poll(cx).is_pending() {
-                    is_pending = true;
-                } else if fut.as_mut().output_mut().expect("expected completed future").is_err() {
-                    return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap()))
+                    // Safety: future is stored on the stack above
+                    // and never moved.
+                    let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+                    // Try polling
+                    if fut.as_mut().poll(cx).is_pending() {
+                        is_pending = true;
+                    } else if fut.as_mut().output_mut().expect("expected completed future").is_err() {
+                        return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap()))
+                    }
+                } else {
+                    // Future skipped, one less future to skip in the next iteration
+                    skip -= 1;
                 }
             )*
+            }
 
             if is_pending {
                 Pending
@@ -159,13 +194,13 @@
 
     // ===== Normalize =====
 
-    (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
-        $crate::try_join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*)
+    (@ { ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
+      $crate::try_join!(@{ ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*)
     };
 
     // ===== Entry point =====
 
     ( $($e:expr),* $(,)?) => {
-        $crate::try_join!(@{ () } $($e,)*)
+        $crate::try_join!(@{ () (0) } $($e,)*)
     };
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/net/addr.rs b/third_party/rust_crates/vendor/tokio/src/net/addr.rs
index 13f743c..e592aee 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/addr.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/addr.rs
@@ -136,7 +136,22 @@
     type Future = ReadyFuture<Self::Iter>;
 
     fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future {
-        let iter = self.to_vec().into_iter();
+        #[inline]
+        fn slice_to_vec(addrs: &[SocketAddr]) -> Vec<SocketAddr> {
+            addrs.to_vec()
+        }
+
+        // This uses a helper method because clippy doesn't like the `to_vec()`
+        // call here (it will allocate, whereas `self.iter().copied()` would
+        // not), but it's actually necessary in order to ensure that the
+        // returned iterator is valid for the `'static` lifetime, which the
+        // borrowed `slice::Iter` iterator would not be.
+        //
+        // Note that we can't actually add an `allow` attribute for
+        // `clippy::unnecessary_to_owned` here, as Tokio's CI runs clippy lints
+        // on Rust 1.52 to avoid breaking LTS releases of Tokio. Users of newer
+        // Rust versions who see this lint should just ignore it.
+        let iter = slice_to_vec(self).into_iter();
         future::ready(Ok(iter))
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/net/tcp/socket.rs b/third_party/rust_crates/vendor/tokio/src/net/tcp/socket.rs
index 171e240..bc93e83 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/tcp/socket.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/tcp/socket.rs
@@ -424,6 +424,11 @@
         self.inner.local_addr().and_then(convert_address)
     }
 
+    /// Returns the value of the `SO_ERROR` option.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.inner.take_error()
+    }
+
     /// Binds the socket to the given address.
     ///
     /// This calls the `bind(2)` operating-system function. Behavior is
diff --git a/third_party/rust_crates/vendor/tokio/src/net/tcp/stream.rs b/third_party/rust_crates/vendor/tokio/src/net/tcp/stream.rs
index ebb67b8..204d9ca 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/tcp/stream.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/tcp/stream.rs
@@ -264,6 +264,11 @@
         self.io.local_addr()
     }
 
+    /// Returns the value of the `SO_ERROR` option.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.io.take_error()
+    }
+
     /// Returns the remote address that this stream is connected to.
     ///
     /// # Examples
@@ -968,7 +973,9 @@
         interest: Interest,
         f: impl FnOnce() -> io::Result<R>,
     ) -> io::Result<R> {
-        self.io.registration().try_io(interest, f)
+        self.io
+            .registration()
+            .try_io(interest, || self.io.try_io(f))
     }
 
     /// Receives data on the socket from the remote address to which it is
diff --git a/third_party/rust_crates/vendor/tokio/src/net/udp.rs b/third_party/rust_crates/vendor/tokio/src/net/udp.rs
index 12af5152..bd905e9 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/udp.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/udp.rs
@@ -278,6 +278,28 @@
         self.io.local_addr()
     }
 
+    /// Returns the socket address of the remote peer this socket was connected to.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use tokio::net::UdpSocket;
+    ///
+    /// # use std::{io, net::SocketAddr};
+    /// # #[tokio::main]
+    /// # async fn main() -> io::Result<()> {
+    /// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap();
+    /// let peer = "127.0.0.1:11100".parse::<SocketAddr>().unwrap();
+    /// let sock = UdpSocket::bind(addr).await?;
+    /// sock.connect(peer).await?;
+    /// assert_eq!(peer, sock.peer_addr()?);
+    /// #    Ok(())
+    /// # }
+    /// ```
+    pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+        self.io.peer_addr()
+    }
+
     /// Connects the UDP socket setting the default destination for send() and
     /// limiting packets that are read via recv from the address specified in
     /// `addr`.
@@ -1272,7 +1294,9 @@
         interest: Interest,
         f: impl FnOnce() -> io::Result<R>,
     ) -> io::Result<R> {
-        self.io.registration().try_io(interest, f)
+        self.io
+            .registration()
+            .try_io(interest, || self.io.try_io(f))
     }
 
     /// Receives data from the socket, without removing it from the input queue.
diff --git a/third_party/rust_crates/vendor/tokio/src/net/unix/datagram/socket.rs b/third_party/rust_crates/vendor/tokio/src/net/unix/datagram/socket.rs
index d5b6186..def006c 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/unix/datagram/socket.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/unix/datagram/socket.rs
@@ -1241,7 +1241,9 @@
         interest: Interest,
         f: impl FnOnce() -> io::Result<R>,
     ) -> io::Result<R> {
-        self.io.registration().try_io(interest, f)
+        self.io
+            .registration()
+            .try_io(interest, || self.io.try_io(f))
     }
 
     /// Returns the local address that this socket is bound to.
diff --git a/third_party/rust_crates/vendor/tokio/src/net/unix/stream.rs b/third_party/rust_crates/vendor/tokio/src/net/unix/stream.rs
index 4e7ef87..fe2d825 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/unix/stream.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/unix/stream.rs
@@ -685,7 +685,9 @@
         interest: Interest,
         f: impl FnOnce() -> io::Result<R>,
     ) -> io::Result<R> {
-        self.io.registration().try_io(interest, f)
+        self.io
+            .registration()
+            .try_io(interest, || self.io.try_io(f))
     }
 
     /// Creates new `UnixStream` from a `std::os::unix::net::UnixStream`.
diff --git a/third_party/rust_crates/vendor/tokio/src/net/windows/named_pipe.rs b/third_party/rust_crates/vendor/tokio/src/net/windows/named_pipe.rs
index 550fd4d..695b8eb3 100644
--- a/third_party/rust_crates/vendor/tokio/src/net/windows/named_pipe.rs
+++ b/third_party/rust_crates/vendor/tokio/src/net/windows/named_pipe.rs
@@ -12,6 +12,10 @@
 use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready};
 use crate::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle};
 
+cfg_io_util! {
+    use bytes::BufMut;
+}
+
 // Hide imports which are not used when generating documentation.
 #[cfg(not(docsrs))]
 mod doc {
@@ -528,6 +532,86 @@
             .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs))
     }
 
+    cfg_io_util! {
+        /// Tries to read data from the stream into the provided buffer, advancing the
+        /// buffer's internal cursor, returning how many bytes were read.
+        ///
+        /// Receives any pending data from the socket but does not wait for new data
+        /// to arrive. On success, returns the number of bytes read. Because
+        /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by
+        /// the async task and can exist entirely on the stack.
+        ///
+        /// Usually, [`readable()`] or [`ready()`] is used with this function.
+        ///
+        /// [`readable()`]: NamedPipeServer::readable()
+        /// [`ready()`]: NamedPipeServer::ready()
+        ///
+        /// # Return
+        ///
+        /// If data is successfully read, `Ok(n)` is returned, where `n` is the
+        /// number of bytes read. `Ok(0)` indicates the stream's read half is closed
+        /// and will no longer yield data. If the stream is not ready to read data
+        /// `Err(io::ErrorKind::WouldBlock)` is returned.
+        ///
+        /// # Examples
+        ///
+        /// ```no_run
+        /// use tokio::net::windows::named_pipe;
+        /// use std::error::Error;
+        /// use std::io;
+        ///
+        /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable";
+        ///
+        /// #[tokio::main]
+        /// async fn main() -> Result<(), Box<dyn Error>> {
+        ///     let server = named_pipe::ServerOptions::new().create(PIPE_NAME)?;
+        ///
+        ///     loop {
+        ///         // Wait for the socket to be readable
+        ///         server.readable().await?;
+        ///
+        ///         let mut buf = Vec::with_capacity(4096);
+        ///
+        ///         // Try to read data, this may still fail with `WouldBlock`
+        ///         // if the readiness event is a false positive.
+        ///         match server.try_read_buf(&mut buf) {
+        ///             Ok(0) => break,
+        ///             Ok(n) => {
+        ///                 println!("read {} bytes", n);
+        ///             }
+        ///             Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+        ///                 continue;
+        ///             }
+        ///             Err(e) => {
+        ///                 return Err(e.into());
+        ///             }
+        ///         }
+        ///     }
+        ///
+        ///     Ok(())
+        /// }
+        /// ```
+        pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> {
+            self.io.registration().try_io(Interest::READABLE, || {
+                use std::io::Read;
+
+                let dst = buf.chunk_mut();
+                let dst =
+                    unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) };
+
+                // Safety: We trust `NamedPipeServer::read` to have filled up `n` bytes in the
+                // buffer.
+                let n = (&*self.io).read(dst)?;
+
+                unsafe {
+                    buf.advance_mut(n);
+                }
+
+                Ok(n)
+            })
+        }
+    }
+
     /// Waits for the pipe to become writable.
     ///
     /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually
@@ -1186,6 +1270,86 @@
             .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs))
     }
 
+    cfg_io_util! {
+        /// Tries to read data from the stream into the provided buffer, advancing the
+        /// buffer's internal cursor, returning how many bytes were read.
+        ///
+        /// Receives any pending data from the socket but does not wait for new data
+        /// to arrive. On success, returns the number of bytes read. Because
+        /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by
+        /// the async task and can exist entirely on the stack.
+        ///
+        /// Usually, [`readable()`] or [`ready()`] is used with this function.
+        ///
+        /// [`readable()`]: NamedPipeClient::readable()
+        /// [`ready()`]: NamedPipeClient::ready()
+        ///
+        /// # Return
+        ///
+        /// If data is successfully read, `Ok(n)` is returned, where `n` is the
+        /// number of bytes read. `Ok(0)` indicates the stream's read half is closed
+        /// and will no longer yield data. If the stream is not ready to read data
+        /// `Err(io::ErrorKind::WouldBlock)` is returned.
+        ///
+        /// # Examples
+        ///
+        /// ```no_run
+        /// use tokio::net::windows::named_pipe;
+        /// use std::error::Error;
+        /// use std::io;
+        ///
+        /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable";
+        ///
+        /// #[tokio::main]
+        /// async fn main() -> Result<(), Box<dyn Error>> {
+        ///     let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?;
+        ///
+        ///     loop {
+        ///         // Wait for the socket to be readable
+        ///         client.readable().await?;
+        ///
+        ///         let mut buf = Vec::with_capacity(4096);
+        ///
+        ///         // Try to read data, this may still fail with `WouldBlock`
+        ///         // if the readiness event is a false positive.
+        ///         match client.try_read_buf(&mut buf) {
+        ///             Ok(0) => break,
+        ///             Ok(n) => {
+        ///                 println!("read {} bytes", n);
+        ///             }
+        ///             Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+        ///                 continue;
+        ///             }
+        ///             Err(e) => {
+        ///                 return Err(e.into());
+        ///             }
+        ///         }
+        ///     }
+        ///
+        ///     Ok(())
+        /// }
+        /// ```
+        pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> {
+            self.io.registration().try_io(Interest::READABLE, || {
+                use std::io::Read;
+
+                let dst = buf.chunk_mut();
+                let dst =
+                    unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) };
+
+                // Safety: We trust `NamedPipeClient::read` to have filled up `n` bytes in the
+                // buffer.
+                let n = (&*self.io).read(dst)?;
+
+                unsafe {
+                    buf.advance_mut(n);
+                }
+
+                Ok(n)
+            })
+        }
+    }
+
     /// Waits for the pipe to become writable.
     ///
     /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually
diff --git a/third_party/rust_crates/vendor/tokio/src/process/mod.rs b/third_party/rust_crates/vendor/tokio/src/process/mod.rs
index 4e1a21d..719fdee 100644
--- a/third_party/rust_crates/vendor/tokio/src/process/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/process/mod.rs
@@ -111,7 +111,7 @@
 //!     let mut cmd = Command::new("sort");
 //!
 //!     // Specifying that we want pipe both the output and the input.
-//!     // Similarily to capturing the output, by configuring the pipe
+//!     // Similarly to capturing the output, by configuring the pipe
 //!     // to stdin it can now be used as an asynchronous writer.
 //!     cmd.stdout(Stdio::piped());
 //!     cmd.stdin(Stdio::piped());
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/basic_scheduler.rs b/third_party/rust_crates/vendor/tokio/src/runtime/basic_scheduler.rs
index 401f55b..2f0f8d3 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/basic_scheduler.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/basic_scheduler.rs
@@ -5,7 +5,7 @@
 use crate::runtime::context::EnterGuard;
 use crate::runtime::driver::Driver;
 use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task};
-use crate::runtime::Callback;
+use crate::runtime::{Callback, HandleInner};
 use crate::runtime::{MetricsBatch, SchedulerMetrics, WorkerMetrics};
 use crate::sync::notify::Notify;
 use crate::util::atomic_cell::AtomicCell;
@@ -48,7 +48,7 @@
     spawner: Spawner,
 
     /// Current tick
-    tick: u8,
+    tick: u32,
 
     /// Runtime driver
     ///
@@ -57,6 +57,12 @@
 
     /// Metrics batch
     metrics: MetricsBatch,
+
+    /// How many ticks before pulling a task from the global/remote queue?
+    global_queue_interval: u32,
+
+    /// How many ticks before yielding to the driver for timer and I/O events?
+    event_interval: u32,
 }
 
 #[derive(Clone)]
@@ -78,6 +84,9 @@
     /// Indicates whether the blocked on thread was woken.
     woken: AtomicBool,
 
+    /// Handle to I/O driver, timer, blocking pool, ...
+    handle_inner: HandleInner,
+
     /// Callback for a worker parking itself
     before_park: Option<Callback>,
 
@@ -104,23 +113,17 @@
 /// Initial queue capacity.
 const INITIAL_CAPACITY: usize = 64;
 
-/// Max number of tasks to poll per tick.
-#[cfg(loom)]
-const MAX_TASKS_PER_TICK: usize = 4;
-#[cfg(not(loom))]
-const MAX_TASKS_PER_TICK: usize = 61;
-
-/// How often to check the remote queue first.
-const REMOTE_FIRST_INTERVAL: u8 = 31;
-
 // Tracks the current BasicScheduler.
 scoped_thread_local!(static CURRENT: Context);
 
 impl BasicScheduler {
     pub(crate) fn new(
         driver: Driver,
+        handle_inner: HandleInner,
         before_park: Option<Callback>,
         after_unpark: Option<Callback>,
+        global_queue_interval: u32,
+        event_interval: u32,
     ) -> BasicScheduler {
         let unpark = driver.unpark();
 
@@ -130,6 +133,7 @@
                 owned: OwnedTasks::new(),
                 unpark,
                 woken: AtomicBool::new(false),
+                handle_inner,
                 before_park,
                 after_unpark,
                 scheduler_metrics: SchedulerMetrics::new(),
@@ -143,6 +147,8 @@
             tick: 0,
             driver: Some(driver),
             metrics: MetricsBatch::new(),
+            global_queue_interval,
+            event_interval,
         })));
 
         BasicScheduler {
@@ -365,12 +371,12 @@
 
 impl Spawner {
     /// Spawns a future onto the basic scheduler
-    pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+    pub(crate) fn spawn<F>(&self, future: F, id: super::task::Id) -> JoinHandle<F::Output>
     where
         F: crate::future::Future + Send + 'static,
         F::Output: Send + 'static,
     {
-        let (handle, notified) = self.shared.owned.bind(future, self.shared.clone());
+        let (handle, notified) = self.shared.owned.bind(future, self.shared.clone(), id);
 
         if let Some(notified) = notified {
             self.shared.schedule(notified);
@@ -397,6 +403,10 @@
     pub(crate) fn reset_woken(&self) -> bool {
         self.shared.woken.swap(false, AcqRel)
     }
+
+    pub(crate) fn as_handle_inner(&self) -> &HandleInner {
+        &self.shared.handle_inner
+    }
 }
 
 cfg_metrics! {
@@ -505,12 +515,12 @@
                     }
                 }
 
-                for _ in 0..MAX_TASKS_PER_TICK {
+                for _ in 0..core.event_interval {
                     // Get and increment the current tick
                     let tick = core.tick;
                     core.tick = core.tick.wrapping_add(1);
 
-                    let entry = if tick % REMOTE_FIRST_INTERVAL == 0 {
+                    let entry = if tick % core.global_queue_interval == 0 {
                         core.spawner.pop().or_else(|| core.tasks.pop_front())
                     } else {
                         core.tasks.pop_front().or_else(|| core.spawner.pop())
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/blocking/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/blocking/mod.rs
index 15fe05c..88d5e6b 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/blocking/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/blocking/mod.rs
@@ -21,28 +21,3 @@
 pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool {
     BlockingPool::new(builder, thread_cap)
 }
-
-/*
-cfg_not_blocking_impl! {
-    use crate::runtime::Builder;
-    use std::time::Duration;
-
-    #[derive(Debug, Clone)]
-    pub(crate) struct BlockingPool {}
-
-    pub(crate) use BlockingPool as Spawner;
-
-    pub(crate) fn create_blocking_pool(_builder: &Builder, _thread_cap: usize) -> BlockingPool {
-        BlockingPool {}
-    }
-
-    impl BlockingPool {
-        pub(crate) fn spawner(&self) -> &BlockingPool {
-            self
-        }
-
-        pub(crate) fn shutdown(&mut self, _duration: Option<Duration>) {
-        }
-    }
-}
-*/
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/blocking/pool.rs b/third_party/rust_crates/vendor/tokio/src/runtime/blocking/pool.rs
index daf1f63..f73868e 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/blocking/pool.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/blocking/pool.rs
@@ -7,7 +7,7 @@
 use crate::runtime::builder::ThreadNameFn;
 use crate::runtime::context;
 use crate::runtime::task::{self, JoinHandle};
-use crate::runtime::{Builder, Callback, Handle};
+use crate::runtime::{Builder, Callback, ToHandle};
 
 use std::collections::{HashMap, VecDeque};
 use std::fmt;
@@ -104,6 +104,7 @@
 /// Runs the provided function on an executor dedicated to blocking operations.
 /// Tasks will be scheduled as non-mandatory, meaning they may not get executed
 /// in case of runtime shutdown.
+#[track_caller]
 pub(crate) fn spawn_blocking<F, R>(func: F) -> JoinHandle<R>
 where
     F: FnOnce() -> R + Send + 'static,
@@ -128,7 +129,7 @@
         R: Send + 'static,
     {
         let rt = context::current();
-        rt.spawn_mandatory_blocking(func)
+        rt.as_inner().spawn_mandatory_blocking(&rt, func)
     }
 }
 
@@ -219,7 +220,7 @@
 // ===== impl Spawner =====
 
 impl Spawner {
-    pub(crate) fn spawn(&self, task: Task, rt: &Handle) -> Result<(), ()> {
+    pub(crate) fn spawn(&self, task: Task, rt: &dyn ToHandle) -> Result<(), ()> {
         let mut shared = self.inner.shared.lock();
 
         if shared.shutdown {
@@ -240,17 +241,29 @@
             if shared.num_th == self.inner.thread_cap {
                 // At max number of threads
             } else {
-                shared.num_th += 1;
                 assert!(shared.shutdown_tx.is_some());
                 let shutdown_tx = shared.shutdown_tx.clone();
 
                 if let Some(shutdown_tx) = shutdown_tx {
                     let id = shared.worker_thread_index;
-                    shared.worker_thread_index += 1;
 
-                    let handle = self.spawn_thread(shutdown_tx, rt, id);
-
-                    shared.worker_threads.insert(id, handle);
+                    match self.spawn_thread(shutdown_tx, rt, id) {
+                        Ok(handle) => {
+                            shared.num_th += 1;
+                            shared.worker_thread_index += 1;
+                            shared.worker_threads.insert(id, handle);
+                        }
+                        Err(ref e) if is_temporary_os_thread_error(e) && shared.num_th > 0 => {
+                            // OS temporarily failed to spawn a new thread.
+                            // The task will be picked up eventually by a currently
+                            // busy thread.
+                        }
+                        Err(e) => {
+                            // The OS refused to spawn the thread and there is no thread
+                            // to pick up the task that has just been pushed to the queue.
+                            panic!("OS can't spawn worker thread: {}", e)
+                        }
+                    }
                 }
             }
         } else {
@@ -270,28 +283,32 @@
     fn spawn_thread(
         &self,
         shutdown_tx: shutdown::Sender,
-        rt: &Handle,
+        rt: &dyn ToHandle,
         id: usize,
-    ) -> thread::JoinHandle<()> {
+    ) -> std::io::Result<thread::JoinHandle<()>> {
         let mut builder = thread::Builder::new().name((self.inner.thread_name)());
 
         if let Some(stack_size) = self.inner.stack_size {
             builder = builder.stack_size(stack_size);
         }
 
-        let rt = rt.clone();
+        let rt = rt.to_handle();
 
-        builder
-            .spawn(move || {
-                // Only the reference should be moved into the closure
-                let _enter = crate::runtime::context::enter(rt.clone());
-                rt.blocking_spawner.inner.run(id);
-                drop(shutdown_tx);
-            })
-            .expect("OS can't spawn a new worker thread")
+        builder.spawn(move || {
+            // Only the reference should be moved into the closure
+            let _enter = crate::runtime::context::enter(rt.clone());
+            rt.as_inner().blocking_spawner.inner.run(id);
+            drop(shutdown_tx);
+        })
     }
 }
 
+// Tells whether the error when spawning a thread is temporary.
+#[inline]
+fn is_temporary_os_thread_error(error: &std::io::Error) -> bool {
+    matches!(error.kind(), std::io::ErrorKind::WouldBlock)
+}
+
 impl Inner {
     fn run(&self, worker_thread_id: usize) {
         if let Some(f) = &self.after_start {
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/builder.rs b/third_party/rust_crates/vendor/tokio/src/runtime/builder.rs
index 91c365f..060de48 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/builder.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/builder.rs
@@ -78,6 +78,12 @@
 
     /// Customizable keep alive timeout for BlockingPool
     pub(super) keep_alive: Option<Duration>,
+
+    /// How many ticks before pulling a task from the global/remote queue?
+    pub(super) global_queue_interval: u32,
+
+    /// How many ticks before yielding to the driver for timer and I/O events?
+    pub(super) event_interval: u32,
 }
 
 pub(crate) type ThreadNameFn = std::sync::Arc<dyn Fn() -> String + Send + Sync + 'static>;
@@ -98,7 +104,13 @@
     ///
     /// [`LocalSet`]: crate::task::LocalSet
     pub fn new_current_thread() -> Builder {
-        Builder::new(Kind::CurrentThread)
+        #[cfg(loom)]
+        const EVENT_INTERVAL: u32 = 4;
+        // The number `61` is fairly arbitrary. I believe this value was copied from golang.
+        #[cfg(not(loom))]
+        const EVENT_INTERVAL: u32 = 61;
+
+        Builder::new(Kind::CurrentThread, 31, EVENT_INTERVAL)
     }
 
     /// Returns a new builder with the multi thread scheduler selected.
@@ -107,14 +119,15 @@
     #[cfg(feature = "rt-multi-thread")]
     #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
     pub fn new_multi_thread() -> Builder {
-        Builder::new(Kind::MultiThread)
+        // The number `61` is fairly arbitrary. I believe this value was copied from golang.
+        Builder::new(Kind::MultiThread, 61, 61)
     }
 
     /// Returns a new runtime builder initialized with default configuration
     /// values.
     ///
     /// Configuration methods can be chained on the return value.
-    pub(crate) fn new(kind: Kind) -> Builder {
+    pub(crate) fn new(kind: Kind, global_queue_interval: u32, event_interval: u32) -> Builder {
         Builder {
             kind,
 
@@ -145,6 +158,11 @@
             after_unpark: None,
 
             keep_alive: None,
+
+            // Defaults for these values depend on the scheduler kind, so we get them
+            // as parameters.
+            global_queue_interval,
+            event_interval,
         }
     }
 
@@ -286,7 +304,6 @@
     /// ```
     /// # use tokio::runtime;
     /// # use std::sync::atomic::{AtomicUsize, Ordering};
-    ///
     /// # pub fn main() {
     /// let rt = runtime::Builder::new_multi_thread()
     ///     .thread_name_fn(|| {
@@ -338,7 +355,6 @@
     ///
     /// ```
     /// # use tokio::runtime;
-    ///
     /// # pub fn main() {
     /// let runtime = runtime::Builder::new_multi_thread()
     ///     .on_thread_start(|| {
@@ -364,7 +380,6 @@
     ///
     /// ```
     /// # use tokio::runtime;
-    ///
     /// # pub fn main() {
     /// let runtime = runtime::Builder::new_multi_thread()
     ///     .on_thread_stop(|| {
@@ -473,7 +488,6 @@
     ///
     /// ```
     /// # use tokio::runtime;
-    ///
     /// # pub fn main() {
     /// let runtime = runtime::Builder::new_multi_thread()
     ///     .on_thread_unpark(|| {
@@ -542,7 +556,6 @@
     /// ```
     /// # use tokio::runtime;
     /// # use std::time::Duration;
-    ///
     /// # pub fn main() {
     /// let rt = runtime::Builder::new_multi_thread()
     ///     .thread_keep_alive(Duration::from_millis(100))
@@ -554,33 +567,104 @@
         self
     }
 
+    /// Sets the number of scheduler ticks after which the scheduler will poll the global
+    /// task queue.
+    ///
+    /// A scheduler "tick" roughly corresponds to one `poll` invocation on a task.
+    ///
+    /// By default the global queue interval is:
+    ///
+    /// * `31` for the current-thread scheduler.
+    /// * `61` for the multithreaded scheduler.
+    ///
+    /// Schedulers have a local queue of already-claimed tasks, and a global queue of incoming
+    /// tasks. Setting the interval to a smaller value increases the fairness of the scheduler,
+    /// at the cost of more synchronization overhead. That can be beneficial for prioritizing
+    /// getting started on new work, especially if tasks frequently yield rather than complete
+    /// or await on further I/O. Conversely, a higher value prioritizes existing work, and
+    /// is a good choice when most tasks quickly complete polling.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::runtime;
+    /// # pub fn main() {
+    /// let rt = runtime::Builder::new_multi_thread()
+    ///     .global_queue_interval(31)
+    ///     .build();
+    /// # }
+    /// ```
+    pub fn global_queue_interval(&mut self, val: u32) -> &mut Self {
+        self.global_queue_interval = val;
+        self
+    }
+
+    /// Sets the number of scheduler ticks after which the scheduler will poll for
+    /// external events (timers, I/O, and so on).
+    ///
+    /// A scheduler "tick" roughly corresponds to one `poll` invocation on a task.
+    ///
+    /// By default, the event interval is `61` for all scheduler types.
+    ///
+    /// Setting the event interval determines the effective "priority" of delivering
+    /// these external events (which may wake up additional tasks), compared to
+    /// executing tasks that are currently ready to run. A smaller value is useful
+    /// when tasks frequently spend a long time in polling, or frequently yield,
+    /// which can result in overly long delays picking up I/O events. Conversely,
+    /// picking up new events requires extra synchronization and syscall overhead,
+    /// so if tasks generally complete their polling quickly, a higher event interval
+    /// will minimize that overhead while still keeping the scheduler responsive to
+    /// events.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::runtime;
+    /// # pub fn main() {
+    /// let rt = runtime::Builder::new_multi_thread()
+    ///     .event_interval(31)
+    ///     .build();
+    /// # }
+    /// ```
+    pub fn event_interval(&mut self, val: u32) -> &mut Self {
+        self.event_interval = val;
+        self
+    }
+
     fn build_basic_runtime(&mut self) -> io::Result<Runtime> {
-        use crate::runtime::{BasicScheduler, Kind};
+        use crate::runtime::{BasicScheduler, HandleInner, Kind};
 
         let (driver, resources) = driver::Driver::new(self.get_cfg())?;
 
-        // And now put a single-threaded scheduler on top of the timer. When
-        // there are no futures ready to do something, it'll let the timer or
-        // the reactor to generate some new stimuli for the futures to continue
-        // in their life.
-        let scheduler =
-            BasicScheduler::new(driver, self.before_park.clone(), self.after_unpark.clone());
-        let spawner = Spawner::Basic(scheduler.spawner().clone());
-
         // Blocking pool
         let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads);
         let blocking_spawner = blocking_pool.spawner().clone();
 
+        let handle_inner = HandleInner {
+            io_handle: resources.io_handle,
+            time_handle: resources.time_handle,
+            signal_handle: resources.signal_handle,
+            clock: resources.clock,
+            blocking_spawner,
+        };
+
+        // And now put a single-threaded scheduler on top of the timer. When
+        // there are no futures ready to do something, it'll let the timer or
+        // the reactor to generate some new stimuli for the futures to continue
+        // in their life.
+        let scheduler = BasicScheduler::new(
+            driver,
+            handle_inner,
+            self.before_park.clone(),
+            self.after_unpark.clone(),
+            self.global_queue_interval,
+            self.event_interval,
+        );
+        let spawner = Spawner::Basic(scheduler.spawner().clone());
+
         Ok(Runtime {
             kind: Kind::CurrentThread(scheduler),
-            handle: Handle {
-                spawner,
-                io_handle: resources.io_handle,
-                time_handle: resources.time_handle,
-                signal_handle: resources.signal_handle,
-                clock: resources.clock,
-                blocking_spawner,
-            },
+            handle: Handle { spawner },
             blocking_pool,
         })
     }
@@ -662,23 +746,18 @@
     impl Builder {
         fn build_threaded_runtime(&mut self) -> io::Result<Runtime> {
             use crate::loom::sys::num_cpus;
-            use crate::runtime::{Kind, ThreadPool};
-            use crate::runtime::park::Parker;
+            use crate::runtime::{HandleInner, Kind, ThreadPool};
 
             let core_threads = self.worker_threads.unwrap_or_else(num_cpus);
 
             let (driver, resources) = driver::Driver::new(self.get_cfg())?;
 
-            let (scheduler, launch) = ThreadPool::new(core_threads, Parker::new(driver), self.before_park.clone(), self.after_unpark.clone());
-            let spawner = Spawner::ThreadPool(scheduler.spawner().clone());
-
             // Create the blocking pool
-            let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads + core_threads);
+            let blocking_pool =
+                blocking::create_blocking_pool(self, self.max_blocking_threads + core_threads);
             let blocking_spawner = blocking_pool.spawner().clone();
 
-            // Create the runtime handle
-            let handle = Handle {
-                spawner,
+            let handle_inner = HandleInner {
                 io_handle: resources.io_handle,
                 time_handle: resources.time_handle,
                 signal_handle: resources.signal_handle,
@@ -686,6 +765,20 @@
                 blocking_spawner,
             };
 
+            let (scheduler, launch) = ThreadPool::new(
+                core_threads,
+                driver,
+                handle_inner,
+                self.before_park.clone(),
+                self.after_unpark.clone(),
+                self.global_queue_interval,
+                self.event_interval,
+            );
+            let spawner = Spawner::ThreadPool(scheduler.spawner().clone());
+
+            // Create the runtime handle
+            let handle = Handle { spawner };
+
             // Spawn the thread pool workers
             let _enter = crate::runtime::context::enter(handle.clone());
             launch.launch();
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/context.rs b/third_party/rust_crates/vendor/tokio/src/runtime/context.rs
index 1f44a53..aebbe18 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/context.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/context.rs
@@ -26,7 +26,7 @@
     pub(crate) fn io_handle() -> crate::runtime::driver::IoHandle {
         match CONTEXT.try_with(|ctx| {
             let ctx = ctx.borrow();
-            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).io_handle.clone()
+            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().io_handle.clone()
         }) {
             Ok(io_handle) => io_handle,
             Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR),
@@ -39,7 +39,7 @@
     pub(crate) fn signal_handle() -> crate::runtime::driver::SignalHandle {
         match CONTEXT.try_with(|ctx| {
             let ctx = ctx.borrow();
-            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).signal_handle.clone()
+            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().signal_handle.clone()
         }) {
             Ok(signal_handle) => signal_handle,
             Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR),
@@ -51,7 +51,7 @@
     pub(crate) fn time_handle() -> crate::runtime::driver::TimeHandle {
         match CONTEXT.try_with(|ctx| {
             let ctx = ctx.borrow();
-            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).time_handle.clone()
+            ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().time_handle.clone()
         }) {
             Ok(time_handle) => time_handle,
             Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR),
@@ -60,7 +60,7 @@
 
     cfg_test_util! {
         pub(crate) fn clock() -> Option<crate::runtime::driver::Clock> {
-            match CONTEXT.try_with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.clock.clone())) {
+            match CONTEXT.try_with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.as_inner().clock.clone())) {
                 Ok(clock) => clock,
                 Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR),
             }
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/handle.rs b/third_party/rust_crates/vendor/tokio/src/runtime/handle.rs
index 9dbe677..118d537 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/handle.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/handle.rs
@@ -16,7 +16,11 @@
 #[derive(Debug, Clone)]
 pub struct Handle {
     pub(super) spawner: Spawner,
+}
 
+/// All internal handles that are *not* the scheduler's spawner.
+#[derive(Debug)]
+pub(crate) struct HandleInner {
     /// Handles to the I/O drivers
     #[cfg_attr(
         not(any(feature = "net", feature = "process", all(unix, feature = "signal"))),
@@ -47,6 +51,11 @@
     pub(super) blocking_spawner: blocking::Spawner,
 }
 
+/// Create a new runtime handle.
+pub(crate) trait ToHandle {
+    fn to_handle(&self) -> Handle;
+}
+
 /// Runtime context guard.
 ///
 /// Returned by [`Runtime::enter`] and [`Handle::enter`], the context guard exits
@@ -63,7 +72,8 @@
 impl Handle {
     /// Enters the runtime context. This allows you to construct types that must
     /// have an executor available on creation such as [`Sleep`] or [`TcpStream`].
-    /// It will also allow you to call methods such as [`tokio::spawn`].
+    /// It will also allow you to call methods such as [`tokio::spawn`] and [`Handle::current`]
+    /// without panicking.
     ///
     /// [`Sleep`]: struct@crate::time::Sleep
     /// [`TcpStream`]: struct@crate::net::TcpStream
@@ -80,8 +90,9 @@
     /// # Panic
     ///
     /// This will panic if called outside the context of a Tokio runtime. That means that you must
-    /// call this on one of the threads **being run by the runtime**. Calling this from within a
-    /// thread created by `std::thread::spawn` (for example) will cause a panic.
+    /// call this on one of the threads **being run by the runtime**, or from a thread with an active
+    /// `EnterGuard`. Calling this from within a thread created by `std::thread::spawn` (for example)
+    /// will cause a panic unless that thread has an active `EnterGuard`.
     ///
     /// # Examples
     ///
@@ -105,9 +116,14 @@
     /// # let handle =
     /// thread::spawn(move || {
     ///     // Notice that the handle is created outside of this thread and then moved in
-    ///     handle.spawn(async { /* ... */ })
-    ///     // This next line would cause a panic
-    ///     // let handle2 = Handle::current();
+    ///     handle.spawn(async { /* ... */ });
+    ///     // This next line would cause a panic because we haven't entered the runtime
+    ///     // and created an EnterGuard
+    ///     // let handle2 = Handle::current(); // panic
+    ///     // So we create a guard here with Handle::enter();
+    ///     let _guard = handle.enter();
+    ///     // Now we can call Handle::current();
+    ///     let handle2 = Handle::current();
     /// });
     /// # handle.join().unwrap();
     /// # });
@@ -159,9 +175,7 @@
         F: Future + Send + 'static,
         F::Output: Send + 'static,
     {
-        #[cfg(all(tokio_unstable, feature = "tracing"))]
-        let future = crate::util::trace::task(future, "task", None);
-        self.spawner.spawn(future)
+        self.spawn_named(future, None)
     }
 
     /// Runs the provided function on an executor dedicated to blocking.
@@ -189,85 +203,11 @@
         F: FnOnce() -> R + Send + 'static,
         R: Send + 'static,
     {
-        let (join_handle, _was_spawned) =
-            if cfg!(debug_assertions) && std::mem::size_of::<F>() > 2048 {
-                self.spawn_blocking_inner(Box::new(func), blocking::Mandatory::NonMandatory, None)
-            } else {
-                self.spawn_blocking_inner(func, blocking::Mandatory::NonMandatory, None)
-            };
-
-        join_handle
+        self.as_inner().spawn_blocking(self, func)
     }
 
-    cfg_fs! {
-        #[track_caller]
-        #[cfg_attr(any(
-            all(loom, not(test)), // the function is covered by loom tests
-            test
-        ), allow(dead_code))]
-        pub(crate) fn spawn_mandatory_blocking<F, R>(&self, func: F) -> Option<JoinHandle<R>>
-        where
-            F: FnOnce() -> R + Send + 'static,
-            R: Send + 'static,
-        {
-            let (join_handle, was_spawned) = if cfg!(debug_assertions) && std::mem::size_of::<F>() > 2048 {
-                self.spawn_blocking_inner(
-                    Box::new(func),
-                    blocking::Mandatory::Mandatory,
-                    None
-                )
-            } else {
-                self.spawn_blocking_inner(
-                    func,
-                    blocking::Mandatory::Mandatory,
-                    None
-                )
-            };
-
-            if was_spawned {
-                Some(join_handle)
-            } else {
-                None
-            }
-        }
-    }
-
-    #[track_caller]
-    pub(crate) fn spawn_blocking_inner<F, R>(
-        &self,
-        func: F,
-        is_mandatory: blocking::Mandatory,
-        name: Option<&str>,
-    ) -> (JoinHandle<R>, bool)
-    where
-        F: FnOnce() -> R + Send + 'static,
-        R: Send + 'static,
-    {
-        let fut = BlockingTask::new(func);
-
-        #[cfg(all(tokio_unstable, feature = "tracing"))]
-        let fut = {
-            use tracing::Instrument;
-            let location = std::panic::Location::caller();
-            let span = tracing::trace_span!(
-                target: "tokio::task::blocking",
-                "runtime.spawn",
-                kind = %"blocking",
-                task.name = %name.unwrap_or_default(),
-                "fn" = %std::any::type_name::<F>(),
-                spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()),
-            );
-            fut.instrument(span)
-        };
-
-        #[cfg(not(all(tokio_unstable, feature = "tracing")))]
-        let _ = name;
-
-        let (task, handle) = task::unowned(fut, NoopSchedule);
-        let spawned = self
-            .blocking_spawner
-            .spawn(blocking::Task::new(task, is_mandatory), self);
-        (handle, spawned.is_ok())
+    pub(crate) fn as_inner(&self) -> &HandleInner {
+        self.spawner.as_handle_inner()
     }
 
     /// Runs a future to completion on this `Handle`'s associated `Runtime`.
@@ -343,7 +283,8 @@
     #[track_caller]
     pub fn block_on<F: Future>(&self, future: F) -> F::Output {
         #[cfg(all(tokio_unstable, feature = "tracing"))]
-        let future = crate::util::trace::task(future, "block_on", None);
+        let future =
+            crate::util::trace::task(future, "block_on", None, super::task::Id::next().as_u64());
 
         // Enter the **runtime** context. This configures spawning, the current I/O driver, ...
         let _rt_enter = self.enter();
@@ -357,11 +298,29 @@
             .expect("failed to park thread")
     }
 
+    #[track_caller]
+    pub(crate) fn spawn_named<F>(&self, future: F, _name: Option<&str>) -> JoinHandle<F::Output>
+    where
+        F: Future + Send + 'static,
+        F::Output: Send + 'static,
+    {
+        let id = crate::runtime::task::Id::next();
+        #[cfg(all(tokio_unstable, feature = "tracing"))]
+        let future = crate::util::trace::task(future, "task", _name, id.as_u64());
+        self.spawner.spawn(future, id)
+    }
+
     pub(crate) fn shutdown(mut self) {
         self.spawner.shutdown();
     }
 }
 
+impl ToHandle for Handle {
+    fn to_handle(&self) -> Handle {
+        self.clone()
+    }
+}
+
 cfg_metrics! {
     use crate::runtime::RuntimeMetrics;
 
@@ -374,6 +333,100 @@
     }
 }
 
+impl HandleInner {
+    #[track_caller]
+    pub(crate) fn spawn_blocking<F, R>(&self, rt: &dyn ToHandle, func: F) -> JoinHandle<R>
+    where
+        F: FnOnce() -> R + Send + 'static,
+        R: Send + 'static,
+    {
+        let (join_handle, _was_spawned) = if cfg!(debug_assertions)
+            && std::mem::size_of::<F>() > 2048
+        {
+            self.spawn_blocking_inner(Box::new(func), blocking::Mandatory::NonMandatory, None, rt)
+        } else {
+            self.spawn_blocking_inner(func, blocking::Mandatory::NonMandatory, None, rt)
+        };
+
+        join_handle
+    }
+
+    cfg_fs! {
+        #[track_caller]
+        #[cfg_attr(any(
+            all(loom, not(test)), // the function is covered by loom tests
+            test
+        ), allow(dead_code))]
+        pub(crate) fn spawn_mandatory_blocking<F, R>(&self, rt: &dyn ToHandle, func: F) -> Option<JoinHandle<R>>
+        where
+            F: FnOnce() -> R + Send + 'static,
+            R: Send + 'static,
+        {
+            let (join_handle, was_spawned) = if cfg!(debug_assertions) && std::mem::size_of::<F>() > 2048 {
+                self.spawn_blocking_inner(
+                    Box::new(func),
+                    blocking::Mandatory::Mandatory,
+                    None,
+                    rt,
+                )
+            } else {
+                self.spawn_blocking_inner(
+                    func,
+                    blocking::Mandatory::Mandatory,
+                    None,
+                    rt,
+                )
+            };
+
+            if was_spawned {
+                Some(join_handle)
+            } else {
+                None
+            }
+        }
+    }
+
+    #[track_caller]
+    pub(crate) fn spawn_blocking_inner<F, R>(
+        &self,
+        func: F,
+        is_mandatory: blocking::Mandatory,
+        name: Option<&str>,
+        rt: &dyn ToHandle,
+    ) -> (JoinHandle<R>, bool)
+    where
+        F: FnOnce() -> R + Send + 'static,
+        R: Send + 'static,
+    {
+        let fut = BlockingTask::new(func);
+        let id = super::task::Id::next();
+        #[cfg(all(tokio_unstable, feature = "tracing"))]
+        let fut = {
+            use tracing::Instrument;
+            let location = std::panic::Location::caller();
+            let span = tracing::trace_span!(
+                target: "tokio::task::blocking",
+                "runtime.spawn",
+                kind = %"blocking",
+                task.name = %name.unwrap_or_default(),
+                task.id = id.as_u64(),
+                "fn" = %std::any::type_name::<F>(),
+                spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()),
+            );
+            fut.instrument(span)
+        };
+
+        #[cfg(not(all(tokio_unstable, feature = "tracing")))]
+        let _ = name;
+
+        let (task, handle) = task::unowned(fut, NoopSchedule, id);
+        let spawned = self
+            .blocking_spawner
+            .spawn(blocking::Task::new(task, is_mandatory), rt);
+        (handle, spawned.is_ok())
+    }
+}
+
 /// Error returned by `try_current` when no Runtime has been started
 #[derive(Debug)]
 pub struct TryCurrentError {
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/io.rs b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/io.rs
new file mode 100644
index 0000000..06efdd4
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/io.rs
@@ -0,0 +1,24 @@
+#![cfg_attr(not(feature = "net"), allow(dead_code))]
+
+use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed};
+
+#[derive(Default)]
+pub(crate) struct IoDriverMetrics {
+    pub(super) fd_registered_count: AtomicU64,
+    pub(super) fd_deregistered_count: AtomicU64,
+    pub(super) ready_count: AtomicU64,
+}
+
+impl IoDriverMetrics {
+    pub(crate) fn incr_fd_count(&self) {
+        self.fd_registered_count.fetch_add(1, Relaxed);
+    }
+
+    pub(crate) fn dec_fd_count(&self) {
+        self.fd_deregistered_count.fetch_add(1, Relaxed);
+    }
+
+    pub(crate) fn incr_ready_count_by(&self, amt: u64) {
+        self.ready_count.fetch_add(amt, Relaxed);
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/mod.rs
index ca643a5..4b96f1b 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/mod.rs
@@ -21,6 +21,11 @@
 
     mod worker;
     pub(crate) use worker::WorkerMetrics;
+
+    cfg_net! {
+        mod io;
+        pub(crate) use io::IoDriverMetrics;
+    }
 }
 
 cfg_not_metrics! {
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/runtime.rs b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/runtime.rs
index 0f80559..59a8752 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/runtime.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/runtime.rs
@@ -386,7 +386,7 @@
     /// Returns the number of tasks currently scheduled in the runtime's
     /// injection queue.
     ///
-    /// Tasks that are spanwed or notified from a non-runtime thread are
+    /// Tasks that are spawned or notified from a non-runtime thread are
     /// scheduled using the runtime's injection queue. This metric returns the
     /// **current** number of tasks pending in the injection queue. As such, the
     /// returned value may increase or decrease as new tasks are scheduled and
@@ -447,3 +447,90 @@
         self.handle.spawner.worker_local_queue_depth(worker)
     }
 }
+
+cfg_net! {
+    impl RuntimeMetrics {
+        /// Returns the number of file descriptors that have been registered with the
+        /// runtime's I/O driver.
+        ///
+        /// # Examples
+        ///
+        /// ```
+        /// use tokio::runtime::Handle;
+        ///
+        /// #[tokio::main]
+        /// async fn main() {
+        ///     let metrics = Handle::current().metrics();
+        ///
+        ///     let registered_fds = metrics.io_driver_fd_registered_count();
+        ///     println!("{} fds have been registered with the runtime's I/O driver.", registered_fds);
+        ///
+        ///     let deregistered_fds = metrics.io_driver_fd_deregistered_count();
+        ///
+        ///     let current_fd_count = registered_fds - deregistered_fds;
+        ///     println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count);
+        /// }
+        /// ```
+        pub fn io_driver_fd_registered_count(&self) -> u64 {
+            self.with_io_driver_metrics(|m| {
+                m.fd_registered_count.load(Relaxed)
+            })
+        }
+
+        /// Returns the number of file descriptors that have been deregistered by the
+        /// runtime's I/O driver.
+        ///
+        /// # Examples
+        ///
+        /// ```
+        /// use tokio::runtime::Handle;
+        ///
+        /// #[tokio::main]
+        /// async fn main() {
+        ///     let metrics = Handle::current().metrics();
+        ///
+        ///     let n = metrics.io_driver_fd_deregistered_count();
+        ///     println!("{} fds have been deregistered by the runtime's I/O driver.", n);
+        /// }
+        /// ```
+        pub fn io_driver_fd_deregistered_count(&self) -> u64 {
+            self.with_io_driver_metrics(|m| {
+                m.fd_deregistered_count.load(Relaxed)
+            })
+        }
+
+        /// Returns the number of ready events processed by the runtime's
+        /// I/O driver.
+        ///
+        /// # Examples
+        ///
+        /// ```
+        /// use tokio::runtime::Handle;
+        ///
+        /// #[tokio::main]
+        /// async fn main() {
+        ///     let metrics = Handle::current().metrics();
+        ///
+        ///     let n = metrics.io_driver_ready_count();
+        ///     println!("{} ready events procssed by the runtime's I/O driver.", n);
+        /// }
+        /// ```
+        pub fn io_driver_ready_count(&self) -> u64 {
+            self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed))
+        }
+
+        fn with_io_driver_metrics<F>(&self, f: F) -> u64
+        where
+            F: Fn(&super::IoDriverMetrics) -> u64,
+        {
+            // TODO: Investigate if this should return 0, most of our metrics always increase
+            // thus this breaks that guarantee.
+            self.handle
+                .as_inner()
+                .io_handle
+                .as_ref()
+                .map(|h| f(h.metrics()))
+                .unwrap_or(0)
+        }
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/worker.rs b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/worker.rs
index c9b85e4..ec58de6 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/metrics/worker.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/metrics/worker.rs
@@ -1,7 +1,7 @@
 use crate::loom::sync::atomic::Ordering::Relaxed;
 use crate::loom::sync::atomic::{AtomicU64, AtomicUsize};
 
-/// Retreive runtime worker metrics.
+/// Retrieve runtime worker metrics.
 ///
 /// **Note**: This is an [unstable API][unstable]. The public API of this type
 /// may break in 1.x releases. See [the documentation on unstable
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/mod.rs
index 7c381b0..a030ccd 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/mod.rs
@@ -187,6 +187,10 @@
     pub use metrics::RuntimeMetrics;
 
     pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics};
+
+    cfg_net! {
+       pub(crate) use metrics::IoDriverMetrics;
+    }
 }
 
 cfg_not_metrics! {
@@ -214,24 +218,20 @@
     pub use self::builder::Builder;
 
     pub(crate) mod context;
-    pub(crate) mod driver;
+    mod driver;
 
     use self::enter::enter;
 
     mod handle;
     pub use handle::{EnterGuard, Handle, TryCurrentError};
+    pub(crate) use handle::{HandleInner, ToHandle};
 
     mod spawner;
     use self::spawner::Spawner;
 }
 
 cfg_rt_multi_thread! {
-    mod park;
-    use park::Parker;
-}
-
-cfg_rt_multi_thread! {
-    mod queue;
+    use driver::Driver;
 
     pub(crate) mod thread_pool;
     use self::thread_pool::ThreadPool;
@@ -435,6 +435,8 @@
         /// When the multi thread scheduler is used this will allow futures
         /// to run within the io driver and timer context of the overall runtime.
         ///
+        /// Any spawned tasks will continue running after `block_on` returns.
+        ///
         /// # Current thread scheduler
         ///
         /// When the current thread scheduler is enabled `block_on`
@@ -444,6 +446,9 @@
         /// When the first `block_on` completes, other threads will be able to
         /// "steal" the driver to allow continued execution of their futures.
         ///
+        /// Any spawned tasks will be suspended after `block_on` returns. Calling
+        /// `block_on` again will resume previously spawned tasks.
+        ///
         /// # Panics
         ///
         /// This function panics if the provided future panics, or if called within an
@@ -467,7 +472,7 @@
         #[track_caller]
         pub fn block_on<F: Future>(&self, future: F) -> F::Output {
             #[cfg(all(tokio_unstable, feature = "tracing"))]
-            let future = crate::util::trace::task(future, "block_on", None);
+            let future = crate::util::trace::task(future, "block_on", None, task::Id::next().as_u64());
 
             let _enter = self.enter();
 
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/spawner.rs b/third_party/rust_crates/vendor/tokio/src/runtime/spawner.rs
index d81a806..fb4d7f9 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/spawner.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/spawner.rs
@@ -1,5 +1,6 @@
 use crate::future::Future;
-use crate::runtime::basic_scheduler;
+use crate::runtime::task::Id;
+use crate::runtime::{basic_scheduler, HandleInner};
 use crate::task::JoinHandle;
 
 cfg_rt_multi_thread! {
@@ -23,15 +24,23 @@
         }
     }
 
-    pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+    pub(crate) fn spawn<F>(&self, future: F, id: Id) -> JoinHandle<F::Output>
     where
         F: Future + Send + 'static,
         F::Output: Send + 'static,
     {
         match self {
-            Spawner::Basic(spawner) => spawner.spawn(future),
+            Spawner::Basic(spawner) => spawner.spawn(future, id),
             #[cfg(feature = "rt-multi-thread")]
-            Spawner::ThreadPool(spawner) => spawner.spawn(future),
+            Spawner::ThreadPool(spawner) => spawner.spawn(future, id),
+        }
+    }
+
+    pub(crate) fn as_handle_inner(&self) -> &HandleInner {
+        match self {
+            Spawner::Basic(spawner) => spawner.as_handle_inner(),
+            #[cfg(feature = "rt-multi-thread")]
+            Spawner::ThreadPool(spawner) => spawner.as_handle_inner(),
         }
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/abort.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/abort.rs
new file mode 100644
index 0000000..3188394
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/abort.rs
@@ -0,0 +1,104 @@
+use crate::runtime::task::{Id, RawTask};
+use std::fmt;
+use std::panic::{RefUnwindSafe, UnwindSafe};
+
+/// An owned permission to abort a spawned task, without awaiting its completion.
+///
+/// Unlike a [`JoinHandle`], an `AbortHandle` does *not* represent the
+/// permission to await the task's completion, only to terminate it.
+///
+/// The task may be aborted by calling the [`AbortHandle::abort`] method.
+/// Dropping an `AbortHandle` releases the permission to terminate the task
+/// --- it does *not* abort the task.
+///
+/// **Note**: This is an [unstable API][unstable]. The public API of this type
+/// may break in 1.x releases. See [the documentation on unstable
+/// features][unstable] for details.
+///
+/// [unstable]: crate#unstable-features
+/// [`JoinHandle`]: crate::task::JoinHandle
+#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))]
+#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
+pub struct AbortHandle {
+    raw: Option<RawTask>,
+    id: Id,
+}
+
+impl AbortHandle {
+    pub(super) fn new(raw: Option<RawTask>, id: Id) -> Self {
+        Self { raw, id }
+    }
+
+    /// Abort the task associated with the handle.
+    ///
+    /// Awaiting a cancelled task might complete as usual if the task was
+    /// already completed at the time it was cancelled, but most likely it
+    /// will fail with a [cancelled] `JoinError`.
+    ///
+    /// If the task was already cancelled, such as by [`JoinHandle::abort`],
+    /// this method will do nothing.
+    ///
+    /// [cancelled]: method@super::error::JoinError::is_cancelled
+    /// [`JoinHandle::abort`]: method@super::JoinHandle::abort
+    // the `AbortHandle` type is only publicly exposed when `tokio_unstable` is
+    // enabled, but it is still defined for testing purposes.
+    #[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
+    pub fn abort(&self) {
+        if let Some(ref raw) = self.raw {
+            raw.remote_abort();
+        }
+    }
+
+    /// Checks if the task associated with this `AbortHandle` has finished.
+    ///
+    /// Please note that this method can return `false` even if `abort` has been
+    /// called on the task. This is because the cancellation process may take
+    /// some time, and this method does not return `true` until it has
+    /// completed.
+    #[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
+    pub fn is_finished(&self) -> bool {
+        if let Some(raw) = self.raw {
+            let state = raw.header().state.load();
+            state.is_complete()
+        } else {
+            true
+        }
+    }
+
+    /// Returns a [task ID] that uniquely identifies this task relative to other
+    /// currently spawned tasks.
+    ///
+    /// **Note**: This is an [unstable API][unstable]. The public API of this type
+    /// may break in 1.x releases. See [the documentation on unstable
+    /// features][unstable] for details.
+    ///
+    /// [task ID]: crate::task::Id
+    /// [unstable]: crate#unstable-features
+    #[cfg(tokio_unstable)]
+    #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
+    pub fn id(&self) -> super::Id {
+        self.id.clone()
+    }
+}
+
+unsafe impl Send for AbortHandle {}
+unsafe impl Sync for AbortHandle {}
+
+impl UnwindSafe for AbortHandle {}
+impl RefUnwindSafe for AbortHandle {}
+
+impl fmt::Debug for AbortHandle {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("AbortHandle")
+            .field("id", &self.id)
+            .finish()
+    }
+}
+
+impl Drop for AbortHandle {
+    fn drop(&mut self) {
+        if let Some(raw) = self.raw.take() {
+            raw.drop_abort_handle();
+        }
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/core.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/core.rs
index 776e834..548c56d 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/core.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/core.rs
@@ -13,7 +13,7 @@
 use crate::loom::cell::UnsafeCell;
 use crate::runtime::task::raw::{self, Vtable};
 use crate::runtime::task::state::State;
-use crate::runtime::task::Schedule;
+use crate::runtime::task::{Id, Schedule};
 use crate::util::linked_list;
 
 use std::pin::Pin;
@@ -49,6 +49,9 @@
 
     /// Either the future or the output.
     pub(super) stage: CoreStage<T>,
+
+    /// The task's ID, used for populating `JoinError`s.
+    pub(super) task_id: Id,
 }
 
 /// Crate public as this is also needed by the pool.
@@ -102,7 +105,7 @@
 impl<T: Future, S: Schedule> Cell<T, S> {
     /// Allocates a new task cell, containing the header, trailer, and core
     /// structures.
-    pub(super) fn new(future: T, scheduler: S, state: State) -> Box<Cell<T, S>> {
+    pub(super) fn new(future: T, scheduler: S, state: State, task_id: Id) -> Box<Cell<T, S>> {
         #[cfg(all(tokio_unstable, feature = "tracing"))]
         let id = future.id();
         Box::new(Cell {
@@ -120,6 +123,7 @@
                 stage: CoreStage {
                     stage: UnsafeCell::new(Stage::Running(future)),
                 },
+                task_id,
             },
             trailer: Trailer {
                 waker: UnsafeCell::new(None),
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/error.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/error.rs
index 1a8129b..22b688a 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/error.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/error.rs
@@ -2,12 +2,13 @@
 use std::fmt;
 use std::io;
 
+use super::Id;
 use crate::util::SyncWrapper;
-
 cfg_rt! {
     /// Task failed to execute to completion.
     pub struct JoinError {
         repr: Repr,
+        id: Id,
     }
 }
 
@@ -17,15 +18,17 @@
 }
 
 impl JoinError {
-    pub(crate) fn cancelled() -> JoinError {
+    pub(crate) fn cancelled(id: Id) -> JoinError {
         JoinError {
             repr: Repr::Cancelled,
+            id,
         }
     }
 
-    pub(crate) fn panic(err: Box<dyn Any + Send + 'static>) -> JoinError {
+    pub(crate) fn panic(id: Id, err: Box<dyn Any + Send + 'static>) -> JoinError {
         JoinError {
             repr: Repr::Panic(SyncWrapper::new(err)),
+            id,
         }
     }
 
@@ -111,13 +114,28 @@
             _ => Err(self),
         }
     }
+
+    /// Returns a [task ID] that identifies the task which errored relative to
+    /// other currently spawned tasks.
+    ///
+    /// **Note**: This is an [unstable API][unstable]. The public API of this type
+    /// may break in 1.x releases. See [the documentation on unstable
+    /// features][unstable] for details.
+    ///
+    /// [task ID]: crate::task::Id
+    /// [unstable]: crate#unstable-features
+    #[cfg(tokio_unstable)]
+    #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
+    pub fn id(&self) -> Id {
+        self.id.clone()
+    }
 }
 
 impl fmt::Display for JoinError {
     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
         match &self.repr {
-            Repr::Cancelled => write!(fmt, "cancelled"),
-            Repr::Panic(_) => write!(fmt, "panic"),
+            Repr::Cancelled => write!(fmt, "task {} was cancelled", self.id),
+            Repr::Panic(_) => write!(fmt, "task {} panicked", self.id),
         }
     }
 }
@@ -125,8 +143,8 @@
 impl fmt::Debug for JoinError {
     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
         match &self.repr {
-            Repr::Cancelled => write!(fmt, "JoinError::Cancelled"),
-            Repr::Panic(_) => write!(fmt, "JoinError::Panic(...)"),
+            Repr::Cancelled => write!(fmt, "JoinError::Cancelled({:?})", self.id),
+            Repr::Panic(_) => write!(fmt, "JoinError::Panic({:?}, ...)", self.id),
         }
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/harness.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/harness.rs
index 261dcce..1d3abab 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/harness.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/harness.rs
@@ -100,7 +100,8 @@
                 let header_ptr = self.header_ptr();
                 let waker_ref = waker_ref::<T, S>(&header_ptr);
                 let cx = Context::from_waker(&*waker_ref);
-                let res = poll_future(&self.core().stage, cx);
+                let core = self.core();
+                let res = poll_future(&core.stage, core.task_id.clone(), cx);
 
                 if res == Poll::Ready(()) {
                     // The future completed. Move on to complete the task.
@@ -114,14 +115,15 @@
                     TransitionToIdle::Cancelled => {
                         // The transition to idle failed because the task was
                         // cancelled during the poll.
-
-                        cancel_task(&self.core().stage);
+                        let core = self.core();
+                        cancel_task(&core.stage, core.task_id.clone());
                         PollFuture::Complete
                     }
                 }
             }
             TransitionToRunning::Cancelled => {
-                cancel_task(&self.core().stage);
+                let core = self.core();
+                cancel_task(&core.stage, core.task_id.clone());
                 PollFuture::Complete
             }
             TransitionToRunning::Failed => PollFuture::Done,
@@ -144,7 +146,8 @@
 
         // By transitioning the lifecycle to `Running`, we have permission to
         // drop the future.
-        cancel_task(&self.core().stage);
+        let core = self.core();
+        cancel_task(&core.stage, core.task_id.clone());
         self.complete();
     }
 
@@ -432,7 +435,7 @@
 }
 
 /// Cancels the task and store the appropriate error in the stage field.
-fn cancel_task<T: Future>(stage: &CoreStage<T>) {
+fn cancel_task<T: Future>(stage: &CoreStage<T>, id: super::Id) {
     // Drop the future from a panic guard.
     let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
         stage.drop_future_or_output();
@@ -440,17 +443,17 @@
 
     match res {
         Ok(()) => {
-            stage.store_output(Err(JoinError::cancelled()));
+            stage.store_output(Err(JoinError::cancelled(id)));
         }
         Err(panic) => {
-            stage.store_output(Err(JoinError::panic(panic)));
+            stage.store_output(Err(JoinError::panic(id, panic)));
         }
     }
 }
 
 /// Polls the future. If the future completes, the output is written to the
 /// stage field.
-fn poll_future<T: Future>(core: &CoreStage<T>, cx: Context<'_>) -> Poll<()> {
+fn poll_future<T: Future>(core: &CoreStage<T>, id: super::Id, cx: Context<'_>) -> Poll<()> {
     // Poll the future.
     let output = panic::catch_unwind(panic::AssertUnwindSafe(|| {
         struct Guard<'a, T: Future> {
@@ -473,7 +476,7 @@
     let output = match output {
         Ok(Poll::Pending) => return Poll::Pending,
         Ok(Poll::Ready(output)) => Ok(output),
-        Err(panic) => Err(JoinError::panic(panic)),
+        Err(panic) => Err(JoinError::panic(id, panic)),
     };
 
     // Catch and ignore panics if the future panics on drop.
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/join.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/join.rs
index 8beed2e..a04ec95 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/join.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/join.rs
@@ -1,4 +1,4 @@
-use crate::runtime::task::RawTask;
+use crate::runtime::task::{Id, RawTask};
 
 use std::fmt;
 use std::future::Future;
@@ -144,6 +144,7 @@
     /// [`JoinError`]: crate::task::JoinError
     pub struct JoinHandle<T> {
         raw: Option<RawTask>,
+        id: Id,
         _p: PhantomData<T>,
     }
 }
@@ -155,9 +156,10 @@
 impl<T> RefUnwindSafe for JoinHandle<T> {}
 
 impl<T> JoinHandle<T> {
-    pub(super) fn new(raw: RawTask) -> JoinHandle<T> {
+    pub(super) fn new(raw: RawTask, id: Id) -> JoinHandle<T> {
         JoinHandle {
             raw: Some(raw),
+            id,
             _p: PhantomData,
         }
     }
@@ -201,6 +203,43 @@
         }
     }
 
+    /// Checks if the task associated with this `JoinHandle` has finished.
+    ///
+    /// Please note that this method can return `false` even if [`abort`] has been
+    /// called on the task. This is because the cancellation process may take
+    /// some time, and this method does not return `true` until it has
+    /// completed.
+    ///
+    /// ```rust
+    /// use tokio::time;
+    ///
+    /// # #[tokio::main(flavor = "current_thread")]
+    /// # async fn main() {
+    /// # time::pause();
+    /// let handle1 = tokio::spawn(async {
+    ///     // do some stuff here
+    /// });
+    /// let handle2 = tokio::spawn(async {
+    ///     // do some other stuff here
+    ///     time::sleep(time::Duration::from_secs(10)).await;
+    /// });
+    /// // Wait for the task to finish
+    /// handle2.abort();
+    /// time::sleep(time::Duration::from_secs(1)).await;
+    /// assert!(handle1.is_finished());
+    /// assert!(handle2.is_finished());
+    /// # }
+    /// ```
+    /// [`abort`]: method@JoinHandle::abort
+    pub fn is_finished(&self) -> bool {
+        if let Some(raw) = self.raw {
+            let state = raw.header().state.load();
+            state.is_complete()
+        } else {
+            true
+        }
+    }
+
     /// Set the waker that is notified when the task completes.
     pub(crate) fn set_join_waker(&mut self, waker: &Waker) {
         if let Some(raw) = self.raw {
@@ -210,6 +249,31 @@
             }
         }
     }
+
+    /// Returns a new `AbortHandle` that can be used to remotely abort this task.
+    #[cfg(any(tokio_unstable, test))]
+    pub(crate) fn abort_handle(&self) -> super::AbortHandle {
+        let raw = self.raw.map(|raw| {
+            raw.ref_inc();
+            raw
+        });
+        super::AbortHandle::new(raw, self.id.clone())
+    }
+
+    /// Returns a [task ID] that uniquely identifies this task relative to other
+    /// currently spawned tasks.
+    ///
+    /// **Note**: This is an [unstable API][unstable]. The public API of this type
+    /// may break in 1.x releases. See [the documentation on unstable
+    /// features][unstable] for details.
+    ///
+    /// [task ID]: crate::task::Id
+    /// [unstable]: crate#unstable-features
+    #[cfg(tokio_unstable)]
+    #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
+    pub fn id(&self) -> super::Id {
+        self.id.clone()
+    }
 }
 
 impl<T> Unpin for JoinHandle<T> {}
@@ -270,6 +334,8 @@
     T: fmt::Debug,
 {
     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
-        fmt.debug_struct("JoinHandle").finish()
+        fmt.debug_struct("JoinHandle")
+            .field("id", &self.id)
+            .finish()
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/list.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/list.rs
index 7758f8d..7a1dff0 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/list.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/list.rs
@@ -84,13 +84,14 @@
         &self,
         task: T,
         scheduler: S,
+        id: super::Id,
     ) -> (JoinHandle<T::Output>, Option<Notified<S>>)
     where
         S: Schedule,
         T: Future + Send + 'static,
         T::Output: Send + 'static,
     {
-        let (task, notified, join) = super::new_task(task, scheduler);
+        let (task, notified, join) = super::new_task(task, scheduler, id);
 
         unsafe {
             // safety: We just created the task, so we have exclusive access
@@ -187,13 +188,14 @@
         &self,
         task: T,
         scheduler: S,
+        id: super::Id,
     ) -> (JoinHandle<T::Output>, Option<Notified<S>>)
     where
         S: Schedule,
         T: Future + 'static,
         T::Output: 'static,
     {
-        let (task, notified, join) = super::new_task(task, scheduler);
+        let (task, notified, join) = super::new_task(task, scheduler, id);
 
         unsafe {
             // safety: We just created the task, so we have exclusive access
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/mod.rs
index 2a492dc..316b4a4 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/mod.rs
@@ -155,7 +155,14 @@
     pub(super) use self::inject::Inject;
 }
 
+#[cfg(all(feature = "rt", any(tokio_unstable, test)))]
+mod abort;
 mod join;
+
+#[cfg(all(feature = "rt", any(tokio_unstable, test)))]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::abort::AbortHandle;
+
 #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
 pub use self::join::JoinHandle;
 
@@ -177,6 +184,27 @@
 use std::ptr::NonNull;
 use std::{fmt, mem};
 
+/// An opaque ID that uniquely identifies a task relative to all other currently
+/// running tasks.
+///
+/// # Notes
+///
+/// - Task IDs are unique relative to other *currently running* tasks. When a
+///   task completes, the same ID may be used for another task.
+/// - Task IDs are *not* sequential, and do not indicate the order in which
+///   tasks are spawned, what runtime a task is spawned on, or any other data.
+///
+/// **Note**: This is an [unstable API][unstable]. The public API of this type
+/// may break in 1.x releases. See [the documentation on unstable
+/// features][unstable] for details.
+///
+/// [unstable]: crate#unstable-features
+#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))]
+#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
+// TODO(eliza): there's almost certainly no reason not to make this `Copy` as well...
+#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+pub struct Id(u64);
+
 /// An owned handle to the task, tracked by ref count.
 #[repr(transparent)]
 pub(crate) struct Task<S: 'static> {
@@ -243,14 +271,15 @@
     /// notification.
     fn new_task<T, S>(
         task: T,
-        scheduler: S
+        scheduler: S,
+        id: Id,
     ) -> (Task<S>, Notified<S>, JoinHandle<T::Output>)
     where
         S: Schedule,
         T: Future + 'static,
         T::Output: 'static,
     {
-        let raw = RawTask::new::<T, S>(task, scheduler);
+        let raw = RawTask::new::<T, S>(task, scheduler, id.clone());
         let task = Task {
             raw,
             _p: PhantomData,
@@ -259,7 +288,7 @@
             raw,
             _p: PhantomData,
         });
-        let join = JoinHandle::new(raw);
+        let join = JoinHandle::new(raw, id);
 
         (task, notified, join)
     }
@@ -268,13 +297,13 @@
     /// only when the task is not going to be stored in an `OwnedTasks` list.
     ///
     /// Currently only blocking tasks use this method.
-    pub(crate) fn unowned<T, S>(task: T, scheduler: S) -> (UnownedTask<S>, JoinHandle<T::Output>)
+    pub(crate) fn unowned<T, S>(task: T, scheduler: S, id: Id) -> (UnownedTask<S>, JoinHandle<T::Output>)
     where
         S: Schedule,
         T: Send + Future + 'static,
         T::Output: Send + 'static,
     {
-        let (task, notified, join) = new_task(task, scheduler);
+        let (task, notified, join) = new_task(task, scheduler, id);
 
         // This transfers the ref-count of task and notified into an UnownedTask.
         // This is valid because an UnownedTask holds two ref-counts.
@@ -443,3 +472,46 @@
         NonNull::from(target.as_ref().owned.with_mut(|ptr| &mut *ptr))
     }
 }
+
+impl fmt::Display for Id {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.0.fmt(f)
+    }
+}
+
+impl Id {
+    // When 64-bit atomics are available, use a static `AtomicU64` counter to
+    // generate task IDs.
+    //
+    // Note(eliza): we _could_ just use `crate::loom::AtomicU64`, which switches
+    // between an atomic and mutex-based implementation here, rather than having
+    // two separate functions for targets with and without 64-bit atomics.
+    // However, because we can't use the mutex-based implementation in a static
+    // initializer directly, the 32-bit impl also has to use a `OnceCell`, and I
+    // thought it was nicer to avoid the `OnceCell` overhead on 64-bit
+    // platforms...
+    cfg_has_atomic_u64! {
+        pub(crate) fn next() -> Self {
+            use std::sync::atomic::{AtomicU64, Ordering::Relaxed};
+            static NEXT_ID: AtomicU64 = AtomicU64::new(1);
+            Self(NEXT_ID.fetch_add(1, Relaxed))
+        }
+    }
+
+    cfg_not_has_atomic_u64! {
+        pub(crate) fn next() -> Self {
+            use once_cell::sync::Lazy;
+            use crate::loom::sync::Mutex;
+
+            static NEXT_ID: Lazy<Mutex<u64>> = Lazy::new(|| Mutex::new(1));
+            let mut lock = NEXT_ID.lock();
+            let id = *lock;
+            *lock += 1;
+            Self(id)
+        }
+    }
+
+    pub(crate) fn as_u64(&self) -> u64 {
+        self.0
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/task/raw.rs b/third_party/rust_crates/vendor/tokio/src/runtime/task/raw.rs
index 2e4420b..5555298a 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/task/raw.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/task/raw.rs
@@ -1,5 +1,5 @@
 use crate::future::Future;
-use crate::runtime::task::{Cell, Harness, Header, Schedule, State};
+use crate::runtime::task::{Cell, Harness, Header, Id, Schedule, State};
 
 use std::ptr::NonNull;
 use std::task::{Poll, Waker};
@@ -27,6 +27,9 @@
     /// The join handle has been dropped.
     pub(super) drop_join_handle_slow: unsafe fn(NonNull<Header>),
 
+    /// An abort handle has been dropped.
+    pub(super) drop_abort_handle: unsafe fn(NonNull<Header>),
+
     /// The task is remotely aborted.
     pub(super) remote_abort: unsafe fn(NonNull<Header>),
 
@@ -42,18 +45,19 @@
         try_read_output: try_read_output::<T, S>,
         try_set_join_waker: try_set_join_waker::<T, S>,
         drop_join_handle_slow: drop_join_handle_slow::<T, S>,
+        drop_abort_handle: drop_abort_handle::<T, S>,
         remote_abort: remote_abort::<T, S>,
         shutdown: shutdown::<T, S>,
     }
 }
 
 impl RawTask {
-    pub(super) fn new<T, S>(task: T, scheduler: S) -> RawTask
+    pub(super) fn new<T, S>(task: T, scheduler: S, id: Id) -> RawTask
     where
         T: Future,
         S: Schedule,
     {
-        let ptr = Box::into_raw(Cell::<_, S>::new(task, scheduler, State::new()));
+        let ptr = Box::into_raw(Cell::<_, S>::new(task, scheduler, State::new(), id));
         let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) };
 
         RawTask { ptr }
@@ -104,6 +108,11 @@
         unsafe { (vtable.drop_join_handle_slow)(self.ptr) }
     }
 
+    pub(super) fn drop_abort_handle(self) {
+        let vtable = self.header().vtable;
+        unsafe { (vtable.drop_abort_handle)(self.ptr) }
+    }
+
     pub(super) fn shutdown(self) {
         let vtable = self.header().vtable;
         unsafe { (vtable.shutdown)(self.ptr) }
@@ -113,6 +122,13 @@
         let vtable = self.header().vtable;
         unsafe { (vtable.remote_abort)(self.ptr) }
     }
+
+    /// Increment the task's reference count.
+    ///
+    /// Currently, this is used only when creating an `AbortHandle`.
+    pub(super) fn ref_inc(self) {
+        self.header().state.ref_inc();
+    }
 }
 
 impl Clone for RawTask {
@@ -154,6 +170,11 @@
     harness.drop_join_handle_slow()
 }
 
+unsafe fn drop_abort_handle<T: Future, S: Schedule>(ptr: NonNull<Header>) {
+    let harness = Harness::<T, S>::from_raw(ptr);
+    harness.drop_reference();
+}
+
 unsafe fn remote_abort<T: Future, S: Schedule>(ptr: NonNull<Header>) {
     let harness = Harness::<T, S>::from_raw(ptr);
     harness.remote_abort()
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_join_set.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_join_set.rs
index e87ddb0..392b5bf 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_join_set.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_join_set.rs
@@ -61,15 +61,15 @@
                 set.abort_all();
 
                 match set.join_one().await {
-                    Ok(Some(())) => complete_happened.store(true, SeqCst),
-                    Err(err) if err.is_cancelled() => cancel_happened.store(true, SeqCst),
-                    Err(err) => panic!("fail: {}", err),
-                    Ok(None) => {
+                    Some(Ok(())) => complete_happened.store(true, SeqCst),
+                    Some(Err(err)) if err.is_cancelled() => cancel_happened.store(true, SeqCst),
+                    Some(Err(err)) => panic!("fail: {}", err),
+                    None => {
                         unreachable!("Aborting the task does not remove it from the JoinSet.")
                     }
                 }
 
-                assert!(matches!(set.join_one().await, Ok(None)));
+                assert!(matches!(set.join_one().await, None));
             });
 
             drop(set);
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_queue.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_queue.rs
index b5f78d7..d0ebf5d 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_queue.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/loom_queue.rs
@@ -1,6 +1,7 @@
 use crate::runtime::blocking::NoopSchedule;
 use crate::runtime::task::Inject;
-use crate::runtime::{queue, MetricsBatch};
+use crate::runtime::thread_pool::queue;
+use crate::runtime::MetricsBatch;
 
 use loom::thread;
 
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/mod.rs
index 4b49698a..08724d4 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/mod.rs
@@ -2,7 +2,7 @@
 
 mod unowned_wrapper {
     use crate::runtime::blocking::NoopSchedule;
-    use crate::runtime::task::{JoinHandle, Notified};
+    use crate::runtime::task::{Id, JoinHandle, Notified};
 
     #[cfg(all(tokio_unstable, feature = "tracing"))]
     pub(crate) fn unowned<T>(task: T) -> (Notified<NoopSchedule>, JoinHandle<T::Output>)
@@ -13,7 +13,7 @@
         use tracing::Instrument;
         let span = tracing::trace_span!("test_span");
         let task = task.instrument(span);
-        let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule);
+        let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule, Id::next());
         (task.into_notified(), handle)
     }
 
@@ -23,7 +23,7 @@
         T: std::future::Future + Send + 'static,
         T::Output: Send + 'static,
     {
-        let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule);
+        let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule, Id::next());
         (task.into_notified(), handle)
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/queue.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/queue.rs
index 0fd1e0c..2bdaecf 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/queue.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/queue.rs
@@ -1,5 +1,5 @@
-use crate::runtime::queue;
 use crate::runtime::task::{self, Inject, Schedule, Task};
+use crate::runtime::thread_pool::queue;
 use crate::runtime::MetricsBatch;
 
 use std::thread;
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/task.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/task.rs
index 04e1b56..173e5b0 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/task.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/task.rs
@@ -1,5 +1,5 @@
 use crate::runtime::blocking::NoopSchedule;
-use crate::runtime::task::{self, unowned, JoinHandle, OwnedTasks, Schedule, Task};
+use crate::runtime::task::{self, unowned, Id, JoinHandle, OwnedTasks, Schedule, Task};
 use crate::util::TryLock;
 
 use std::collections::VecDeque;
@@ -55,6 +55,7 @@
             unreachable!()
         },
         NoopSchedule,
+        Id::next(),
     );
     drop(notified);
     handle.assert_not_dropped();
@@ -71,6 +72,7 @@
             unreachable!()
         },
         NoopSchedule,
+        Id::next(),
     );
     drop(join);
     handle.assert_not_dropped();
@@ -78,6 +80,46 @@
     handle.assert_dropped();
 }
 
+#[test]
+fn drop_abort_handle1() {
+    let (ad, handle) = AssertDrop::new();
+    let (notified, join) = unowned(
+        async {
+            drop(ad);
+            unreachable!()
+        },
+        NoopSchedule,
+        Id::next(),
+    );
+    let abort = join.abort_handle();
+    drop(join);
+    handle.assert_not_dropped();
+    drop(notified);
+    handle.assert_not_dropped();
+    drop(abort);
+    handle.assert_dropped();
+}
+
+#[test]
+fn drop_abort_handle2() {
+    let (ad, handle) = AssertDrop::new();
+    let (notified, join) = unowned(
+        async {
+            drop(ad);
+            unreachable!()
+        },
+        NoopSchedule,
+        Id::next(),
+    );
+    let abort = join.abort_handle();
+    drop(notified);
+    handle.assert_not_dropped();
+    drop(abort);
+    handle.assert_not_dropped();
+    drop(join);
+    handle.assert_dropped();
+}
+
 // Shutting down through Notified works
 #[test]
 fn create_shutdown1() {
@@ -88,6 +130,7 @@
             unreachable!()
         },
         NoopSchedule,
+        Id::next(),
     );
     drop(join);
     handle.assert_not_dropped();
@@ -104,6 +147,7 @@
             unreachable!()
         },
         NoopSchedule,
+        Id::next(),
     );
     handle.assert_not_dropped();
     notified.shutdown();
@@ -113,7 +157,7 @@
 
 #[test]
 fn unowned_poll() {
-    let (task, _) = unowned(async {}, NoopSchedule);
+    let (task, _) = unowned(async {}, NoopSchedule, Id::next());
     task.run();
 }
 
@@ -228,7 +272,7 @@
         T: 'static + Send + Future,
         T::Output: 'static + Send,
     {
-        let (handle, notified) = self.0.owned.bind(future, self.clone());
+        let (handle, notified) = self.0.owned.bind(future, self.clone(), Id::next());
 
         if let Some(notified) = notified {
             self.schedule(notified);
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/tests/task_combinations.rs b/third_party/rust_crates/vendor/tokio/src/runtime/tests/task_combinations.rs
index 76ce233..5c7a0b0 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/tests/task_combinations.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/tests/task_combinations.rs
@@ -3,6 +3,7 @@
 use std::pin::Pin;
 use std::task::{Context, Poll};
 
+use crate::runtime::task::AbortHandle;
 use crate::runtime::Builder;
 use crate::sync::oneshot;
 use crate::task::JoinHandle;
@@ -56,6 +57,12 @@
     AbortedAfterConsumeOutput = 4,
 }
 
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum CombiAbortSource {
+    JoinHandle,
+    AbortHandle,
+}
+
 #[test]
 fn test_combinations() {
     let mut rt = &[
@@ -90,6 +97,13 @@
         CombiAbort::AbortedAfterFinish,
         CombiAbort::AbortedAfterConsumeOutput,
     ];
+    let ah = [
+        None,
+        Some(CombiJoinHandle::DropImmediately),
+        Some(CombiJoinHandle::DropFirstPoll),
+        Some(CombiJoinHandle::DropAfterNoConsume),
+        Some(CombiJoinHandle::DropAfterConsume),
+    ];
 
     for rt in rt.iter().copied() {
         for ls in ls.iter().copied() {
@@ -98,7 +112,34 @@
                     for ji in ji.iter().copied() {
                         for jh in jh.iter().copied() {
                             for abort in abort.iter().copied() {
-                                test_combination(rt, ls, task, output, ji, jh, abort);
+                                // abort via join handle --- abort  handles
+                                // may be dropped at any point
+                                for ah in ah.iter().copied() {
+                                    test_combination(
+                                        rt,
+                                        ls,
+                                        task,
+                                        output,
+                                        ji,
+                                        jh,
+                                        ah,
+                                        abort,
+                                        CombiAbortSource::JoinHandle,
+                                    );
+                                }
+                                // if aborting via AbortHandle, it will
+                                // never be dropped.
+                                test_combination(
+                                    rt,
+                                    ls,
+                                    task,
+                                    output,
+                                    ji,
+                                    jh,
+                                    None,
+                                    abort,
+                                    CombiAbortSource::AbortHandle,
+                                );
                             }
                         }
                     }
@@ -108,6 +149,7 @@
     }
 }
 
+#[allow(clippy::too_many_arguments)]
 fn test_combination(
     rt: CombiRuntime,
     ls: CombiLocalSet,
@@ -115,12 +157,24 @@
     output: CombiOutput,
     ji: CombiJoinInterest,
     jh: CombiJoinHandle,
+    ah: Option<CombiJoinHandle>,
     abort: CombiAbort,
+    abort_src: CombiAbortSource,
 ) {
-    if (jh as usize) < (abort as usize) {
-        // drop before abort not possible
-        return;
+    match (abort_src, ah) {
+        (CombiAbortSource::JoinHandle, _) if (jh as usize) < (abort as usize) => {
+            // join handle dropped prior to abort
+            return;
+        }
+        (CombiAbortSource::AbortHandle, Some(_)) => {
+            // abort handle dropped, we can't abort through the
+            // abort handle
+            return;
+        }
+
+        _ => {}
     }
+
     if (task == CombiTask::PanicOnDrop) && (output == CombiOutput::PanicOnDrop) {
         // this causes double panic
         return;
@@ -130,7 +184,7 @@
         return;
     }
 
-    println!("Runtime {:?}, LocalSet {:?}, Task {:?}, Output {:?}, JoinInterest {:?}, JoinHandle {:?}, Abort {:?}", rt, ls, task, output, ji, jh, abort);
+    println!("Runtime {:?}, LocalSet {:?}, Task {:?}, Output {:?}, JoinInterest {:?}, JoinHandle {:?}, AbortHandle {:?}, Abort {:?} ({:?})", rt, ls, task, output, ji, jh, ah, abort, abort_src);
 
     // A runtime optionally with a LocalSet
     struct Rt {
@@ -282,8 +336,24 @@
         );
     }
 
+    // If we are either aborting the task via an abort handle, or dropping via
+    // an abort handle, do that now.
+    let mut abort_handle = if ah.is_some() || abort_src == CombiAbortSource::AbortHandle {
+        handle.as_ref().map(JoinHandle::abort_handle)
+    } else {
+        None
+    };
+
+    let do_abort = |abort_handle: &mut Option<AbortHandle>,
+                    join_handle: Option<&mut JoinHandle<_>>| {
+        match abort_src {
+            CombiAbortSource::AbortHandle => abort_handle.take().unwrap().abort(),
+            CombiAbortSource::JoinHandle => join_handle.unwrap().abort(),
+        }
+    };
+
     if abort == CombiAbort::AbortedImmediately {
-        handle.as_mut().unwrap().abort();
+        do_abort(&mut abort_handle, handle.as_mut());
         aborted = true;
     }
     if jh == CombiJoinHandle::DropImmediately {
@@ -301,12 +371,15 @@
     }
 
     if abort == CombiAbort::AbortedFirstPoll {
-        handle.as_mut().unwrap().abort();
+        do_abort(&mut abort_handle, handle.as_mut());
         aborted = true;
     }
     if jh == CombiJoinHandle::DropFirstPoll {
         drop(handle.take().unwrap());
     }
+    if ah == Some(CombiJoinHandle::DropFirstPoll) {
+        drop(abort_handle.take().unwrap());
+    }
 
     // Signal the future that it can return now
     let _ = on_complete.send(());
@@ -318,23 +391,42 @@
 
     if abort == CombiAbort::AbortedAfterFinish {
         // Don't set aborted to true here as the task already finished
-        handle.as_mut().unwrap().abort();
+        do_abort(&mut abort_handle, handle.as_mut());
     }
     if jh == CombiJoinHandle::DropAfterNoConsume {
-        // The runtime will usually have dropped every ref-count at this point,
-        // in which case dropping the JoinHandle drops the output.
-        //
-        // (But it might race and still hold a ref-count)
-        let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+        if ah == Some(CombiJoinHandle::DropAfterNoConsume) {
             drop(handle.take().unwrap());
-        }));
-        if panic.is_err() {
-            assert!(
-                (output == CombiOutput::PanicOnDrop)
-                    && (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop))
-                    && !aborted,
-                "Dropping JoinHandle shouldn't panic here"
-            );
+            // The runtime will usually have dropped every ref-count at this point,
+            // in which case dropping the AbortHandle drops the output.
+            //
+            // (But it might race and still hold a ref-count)
+            let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+                drop(abort_handle.take().unwrap());
+            }));
+            if panic.is_err() {
+                assert!(
+                    (output == CombiOutput::PanicOnDrop)
+                        && (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop))
+                        && !aborted,
+                    "Dropping AbortHandle shouldn't panic here"
+                );
+            }
+        } else {
+            // The runtime will usually have dropped every ref-count at this point,
+            // in which case dropping the JoinHandle drops the output.
+            //
+            // (But it might race and still hold a ref-count)
+            let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+                drop(handle.take().unwrap());
+            }));
+            if panic.is_err() {
+                assert!(
+                    (output == CombiOutput::PanicOnDrop)
+                        && (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop))
+                        && !aborted,
+                    "Dropping JoinHandle shouldn't panic here"
+                );
+            }
         }
     }
 
@@ -362,11 +454,15 @@
             _ => unreachable!(),
         }
 
-        let handle = handle.take().unwrap();
+        let mut handle = handle.take().unwrap();
         if abort == CombiAbort::AbortedAfterConsumeOutput {
-            handle.abort();
+            do_abort(&mut abort_handle, Some(&mut handle));
         }
         drop(handle);
+
+        if ah == Some(CombiJoinHandle::DropAfterConsume) {
+            drop(abort_handle.take());
+        }
     }
 
     // The output should have been dropped now. Check whether the output
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/mod.rs b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/mod.rs
index d3f4651..5ac71e1 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/mod.rs
@@ -3,14 +3,19 @@
 mod idle;
 use self::idle::Idle;
 
+mod park;
+pub(crate) use park::{Parker, Unparker};
+
+pub(super) mod queue;
+
 mod worker;
 pub(crate) use worker::Launch;
 
 pub(crate) use worker::block_in_place;
 
 use crate::loom::sync::Arc;
-use crate::runtime::task::JoinHandle;
-use crate::runtime::{Callback, Parker};
+use crate::runtime::task::{self, JoinHandle};
+use crate::runtime::{Callback, Driver, HandleInner};
 
 use std::fmt;
 use std::future::Future;
@@ -42,11 +47,23 @@
 impl ThreadPool {
     pub(crate) fn new(
         size: usize,
-        parker: Parker,
+        driver: Driver,
+        handle_inner: HandleInner,
         before_park: Option<Callback>,
         after_unpark: Option<Callback>,
+        global_queue_interval: u32,
+        event_interval: u32,
     ) -> (ThreadPool, Launch) {
-        let (shared, launch) = worker::create(size, parker, before_park, after_unpark);
+        let parker = Parker::new(driver);
+        let (shared, launch) = worker::create(
+            size,
+            parker,
+            handle_inner,
+            before_park,
+            after_unpark,
+            global_queue_interval,
+            event_interval,
+        );
         let spawner = Spawner { shared };
         let thread_pool = ThreadPool { spawner };
 
@@ -90,17 +107,21 @@
 
 impl Spawner {
     /// Spawns a future onto the thread pool
-    pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+    pub(crate) fn spawn<F>(&self, future: F, id: task::Id) -> JoinHandle<F::Output>
     where
         F: crate::future::Future + Send + 'static,
         F::Output: Send + 'static,
     {
-        worker::Shared::bind_new_task(&self.shared, future)
+        worker::Shared::bind_new_task(&self.shared, future, id)
     }
 
     pub(crate) fn shutdown(&mut self) {
         self.shared.close();
     }
+
+    pub(crate) fn as_handle_inner(&self) -> &HandleInner {
+        self.shared.as_handle_inner()
+    }
 }
 
 cfg_metrics! {
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/park.rs b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/park.rs
similarity index 100%
rename from third_party/rust_crates/vendor/tokio/src/runtime/park.rs
rename to third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/park.rs
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/queue.rs b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/queue.rs
similarity index 97%
rename from third_party/rust_crates/vendor/tokio/src/runtime/queue.rs
rename to third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/queue.rs
index ad9085a..1f5841d 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/queue.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/queue.rs
@@ -11,14 +11,14 @@
 use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
 
 /// Producer handle. May only be used from a single thread.
-pub(super) struct Local<T: 'static> {
+pub(crate) struct Local<T: 'static> {
     inner: Arc<Inner<T>>,
 }
 
 /// Consumer handle. May be used from many threads.
-pub(super) struct Steal<T: 'static>(Arc<Inner<T>>);
+pub(crate) struct Steal<T: 'static>(Arc<Inner<T>>);
 
-pub(super) struct Inner<T: 'static> {
+pub(crate) struct Inner<T: 'static> {
     /// Concurrently updated by many threads.
     ///
     /// Contains two `u16` values. The LSB byte is the "real" head of the queue.
@@ -65,7 +65,7 @@
 }
 
 /// Create a new local run-queue
-pub(super) fn local<T: 'static>() -> (Steal<T>, Local<T>) {
+pub(crate) fn local<T: 'static>() -> (Steal<T>, Local<T>) {
     let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY);
 
     for _ in 0..LOCAL_QUEUE_CAPACITY {
@@ -89,7 +89,7 @@
 
 impl<T> Local<T> {
     /// Returns true if the queue has entries that can be stealed.
-    pub(super) fn is_stealable(&self) -> bool {
+    pub(crate) fn is_stealable(&self) -> bool {
         !self.inner.is_empty()
     }
 
@@ -97,12 +97,12 @@
     ///
     /// Separate to is_stealable so that refactors of is_stealable to "protect"
     /// some tasks from stealing won't affect this
-    pub(super) fn has_tasks(&self) -> bool {
+    pub(crate) fn has_tasks(&self) -> bool {
         !self.inner.is_empty()
     }
 
     /// Pushes a task to the back of the local queue, skipping the LIFO slot.
-    pub(super) fn push_back(
+    pub(crate) fn push_back(
         &mut self,
         mut task: task::Notified<T>,
         inject: &Inject<T>,
@@ -259,7 +259,7 @@
     }
 
     /// Pops a task from the local queue.
-    pub(super) fn pop(&mut self) -> Option<task::Notified<T>> {
+    pub(crate) fn pop(&mut self) -> Option<task::Notified<T>> {
         let mut head = self.inner.head.load(Acquire);
 
         let idx = loop {
@@ -301,12 +301,12 @@
 }
 
 impl<T> Steal<T> {
-    pub(super) fn is_empty(&self) -> bool {
+    pub(crate) fn is_empty(&self) -> bool {
         self.0.is_empty()
     }
 
     /// Steals half the tasks from self and place them into `dst`.
-    pub(super) fn steal_into(
+    pub(crate) fn steal_into(
         &self,
         dst: &mut Local<T>,
         dst_metrics: &mut MetricsBatch,
diff --git a/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/worker.rs b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/worker.rs
index 7e49897..b01c5bc 100644
--- a/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/worker.rs
+++ b/third_party/rust_crates/vendor/tokio/src/runtime/thread_pool/worker.rs
@@ -63,10 +63,9 @@
 use crate::park::{Park, Unpark};
 use crate::runtime;
 use crate::runtime::enter::EnterContext;
-use crate::runtime::park::{Parker, Unparker};
 use crate::runtime::task::{Inject, JoinHandle, OwnedTasks};
-use crate::runtime::thread_pool::Idle;
-use crate::runtime::{queue, task, Callback, MetricsBatch, SchedulerMetrics, WorkerMetrics};
+use crate::runtime::thread_pool::{queue, Idle, Parker, Unparker};
+use crate::runtime::{task, Callback, HandleInner, MetricsBatch, SchedulerMetrics, WorkerMetrics};
 use crate::util::atomic_cell::AtomicCell;
 use crate::util::FastRand;
 
@@ -88,7 +87,7 @@
 /// Core data
 struct Core {
     /// Used to schedule bookkeeping tasks every so often.
-    tick: u8,
+    tick: u32,
 
     /// When a task is scheduled from a worker, it is stored in this slot. The
     /// worker will check this slot for a task **before** checking the run
@@ -118,15 +117,26 @@
 
     /// Fast random number generator.
     rand: FastRand,
+
+    /// How many ticks before pulling a task from the global/remote queue?
+    global_queue_interval: u32,
+
+    /// How many ticks before yielding to the driver for timer and I/O events?
+    event_interval: u32,
 }
 
 /// State shared across all workers
 pub(super) struct Shared {
+    /// Handle to the I/O driver, timer, blocking spawner, ...
+    handle_inner: HandleInner,
+
     /// Per-worker remote state. All other workers have access to this and is
     /// how they communicate between each other.
     remotes: Box<[Remote]>,
 
-    /// Submits work to the scheduler while **not** currently on a worker thread.
+    /// Global task queue used for:
+    ///  1. Submit work to the scheduler while **not** currently on a worker thread.
+    ///  2. Submit work to the scheduler when a worker run queue is saturated
     inject: Inject<Arc<Shared>>,
 
     /// Coordinates idle workers
@@ -191,12 +201,15 @@
 pub(super) fn create(
     size: usize,
     park: Parker,
+    handle_inner: HandleInner,
     before_park: Option<Callback>,
     after_unpark: Option<Callback>,
+    global_queue_interval: u32,
+    event_interval: u32,
 ) -> (Arc<Shared>, Launch) {
-    let mut cores = vec![];
-    let mut remotes = vec![];
-    let mut worker_metrics = vec![];
+    let mut cores = Vec::with_capacity(size);
+    let mut remotes = Vec::with_capacity(size);
+    let mut worker_metrics = Vec::with_capacity(size);
 
     // Create the local queues
     for _ in 0..size {
@@ -214,6 +227,8 @@
             park: Some(park),
             metrics: MetricsBatch::new(),
             rand: FastRand::new(seed()),
+            global_queue_interval,
+            event_interval,
         }));
 
         remotes.push(Remote { steal, unpark });
@@ -221,6 +236,7 @@
     }
 
     let shared = Arc::new(Shared {
+        handle_inner,
         remotes: remotes.into_boxed_slice(),
         inject: Inject::new(),
         idle: Idle::new(size),
@@ -340,12 +356,6 @@
     }
 }
 
-/// After how many ticks is the global queue polled. This helps to ensure
-/// fairness.
-///
-/// The number is fairly arbitrary. I believe this value was copied from golang.
-const GLOBAL_POLL_INTERVAL: u8 = 61;
-
 impl Launch {
     pub(crate) fn launch(mut self) {
         for worker in self.0.drain(..) {
@@ -458,7 +468,7 @@
     }
 
     fn maintenance(&self, mut core: Box<Core>) -> Box<Core> {
-        if core.tick % GLOBAL_POLL_INTERVAL == 0 {
+        if core.tick % core.event_interval == 0 {
             // Call `park` with a 0 timeout. This enables the I/O driver, timer, ...
             // to run without actually putting the thread to sleep.
             core = self.park_timeout(core, Some(Duration::from_millis(0)));
@@ -470,6 +480,17 @@
         core
     }
 
+    /// Parks the worker thread while waiting for tasks to execute.
+    ///
+    /// This function checks if indeed there's no more work left to be done before parking.
+    /// Also important to notice that, before parking, the worker thread will try to take
+    /// ownership of the Driver (IO/Time) and dispatch any events that might have fired.
+    /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled
+    /// in its own local queue until the queue saturates (ntasks > LOCAL_QUEUE_CAPACITY).
+    /// When the local queue is saturated, the overflow tasks are added to the injection queue
+    /// from where other workers can pick them up.
+    /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers
+    /// after all the IOs get dispatched
     fn park(&self, mut core: Box<Core>) -> Box<Core> {
         if let Some(f) = &self.worker.shared.before_park {
             f();
@@ -534,7 +555,7 @@
 
     /// Return the next notified task available to this worker.
     fn next_task(&mut self, worker: &Worker) -> Option<Notified> {
-        if self.tick % GLOBAL_POLL_INTERVAL == 0 {
+        if self.tick % self.global_queue_interval == 0 {
             worker.inject().pop().or_else(|| self.next_local_task())
         } else {
             self.next_local_task().or_else(|| worker.inject().pop())
@@ -545,6 +566,11 @@
         self.lifo_slot.take().or_else(|| self.run_queue.pop())
     }
 
+    /// Function responsible for stealing tasks from another worker
+    ///
+    /// Note: Only if less than half the workers are searching for tasks to steal
+    /// a new worker will actually try to steal. The idea is to make sure not all
+    /// workers will be trying to steal at the same time.
     fn steal_work(&mut self, worker: &Worker) -> Option<Notified> {
         if !self.transition_to_searching(worker) {
             return None;
@@ -594,7 +620,7 @@
 
     /// Prepares the worker state for parking.
     ///
-    /// Returns true if the transition happend, false if there is work to do first.
+    /// Returns true if the transition happened, false if there is work to do first.
     fn transition_to_parked(&mut self, worker: &Worker) -> bool {
         // Workers should not park if they have work to do
         if self.lifo_slot.is_some() || self.run_queue.has_tasks() {
@@ -697,12 +723,20 @@
 }
 
 impl Shared {
-    pub(super) fn bind_new_task<T>(me: &Arc<Self>, future: T) -> JoinHandle<T::Output>
+    pub(crate) fn as_handle_inner(&self) -> &HandleInner {
+        &self.handle_inner
+    }
+
+    pub(super) fn bind_new_task<T>(
+        me: &Arc<Self>,
+        future: T,
+        id: crate::runtime::task::Id,
+    ) -> JoinHandle<T::Output>
     where
         T: Future + Send + 'static,
         T::Output: Send + 'static,
     {
-        let (handle, notified) = me.owned.bind(future, me.clone());
+        let (handle, notified) = me.owned.bind(future, me.clone(), id);
 
         if let Some(notified) = notified {
             me.schedule(notified, false);
@@ -835,6 +869,19 @@
     }
 }
 
+impl crate::runtime::ToHandle for Arc<Shared> {
+    fn to_handle(&self) -> crate::runtime::Handle {
+        use crate::runtime::thread_pool::Spawner;
+        use crate::runtime::{self, Handle};
+
+        Handle {
+            spawner: runtime::Spawner::ThreadPool(Spawner {
+                shared: self.clone(),
+            }),
+        }
+    }
+}
+
 cfg_metrics! {
     impl Shared {
         pub(super) fn injection_queue_depth(&self) -> usize {
diff --git a/third_party/rust_crates/vendor/tokio/src/signal/registry.rs b/third_party/rust_crates/vendor/tokio/src/signal/registry.rs
index 6d8eb9e..7795ca8 100644
--- a/third_party/rust_crates/vendor/tokio/src/signal/registry.rs
+++ b/third_party/rust_crates/vendor/tokio/src/signal/registry.rs
@@ -237,7 +237,7 @@
     #[test]
     fn record_invalid_event_does_nothing() {
         let registry = Registry::new(vec![EventInfo::default()]);
-        registry.record_event(42);
+        registry.record_event(1302);
     }
 
     #[test]
diff --git a/third_party/rust_crates/vendor/tokio/src/signal/reusable_box.rs b/third_party/rust_crates/vendor/tokio/src/signal/reusable_box.rs
index 02f3247..796fa21 100644
--- a/third_party/rust_crates/vendor/tokio/src/signal/reusable_box.rs
+++ b/third_party/rust_crates/vendor/tokio/src/signal/reusable_box.rs
@@ -151,7 +151,6 @@
 }
 
 #[cfg(test)]
-#[cfg(not(miri))] // Miri breaks when you use Pin<&mut dyn Future>
 mod test {
     use super::ReusableBoxFuture;
     use futures::future::FutureExt;
diff --git a/third_party/rust_crates/vendor/tokio/src/signal/unix.rs b/third_party/rust_crates/vendor/tokio/src/signal/unix.rs
index 86ea9a9..11f848b 100644
--- a/third_party/rust_crates/vendor/tokio/src/signal/unix.rs
+++ b/third_party/rust_crates/vendor/tokio/src/signal/unix.rs
@@ -22,13 +22,17 @@
 
 pub(crate) type OsStorage = Vec<SignalInfo>;
 
-// Number of different unix signals
-// (FreeBSD has 33)
-const SIGNUM: usize = 33;
-
 impl Init for OsStorage {
     fn init() -> Self {
-        (0..SIGNUM).map(|_| SignalInfo::default()).collect()
+        // There are reliable signals ranging from 1 to 33 available on every Unix platform.
+        #[cfg(not(target_os = "linux"))]
+        let possible = 0..=33;
+
+        // On Linux, there are additional real-time signals available.
+        #[cfg(target_os = "linux")]
+        let possible = 0..=libc::SIGRTMAX();
+
+        possible.map(|_| SignalInfo::default()).collect()
     }
 }
 
@@ -60,7 +64,7 @@
 }
 
 /// Represents the specific kind of signal to listen for.
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
 pub struct SignalKind(libc::c_int);
 
 impl SignalKind {
@@ -84,6 +88,17 @@
         Self(signum as libc::c_int)
     }
 
+    /// Get the signal's numeric value.
+    ///
+    /// ```rust
+    /// # use tokio::signal::unix::SignalKind;
+    /// let kind = SignalKind::interrupt();
+    /// assert_eq!(kind.as_raw_value(), libc::SIGINT);
+    /// ```
+    pub fn as_raw_value(&self) -> std::os::raw::c_int {
+        self.0
+    }
+
     /// Represents the SIGALRM signal.
     ///
     /// On Unix systems this signal is sent when a real-time timer has expired.
@@ -190,6 +205,18 @@
     }
 }
 
+impl From<std::os::raw::c_int> for SignalKind {
+    fn from(signum: std::os::raw::c_int) -> Self {
+        Self::from_raw(signum as libc::c_int)
+    }
+}
+
+impl From<SignalKind> for std::os::raw::c_int {
+    fn from(kind: SignalKind) -> Self {
+        kind.as_raw_value()
+    }
+}
+
 pub(crate) struct SignalInfo {
     event_info: EventInfo,
     init: Once,
@@ -380,6 +407,12 @@
     ///
     /// `None` is returned if no more events can be received by this stream.
     ///
+    /// # Cancel safety
+    ///
+    /// This method is cancel safe. If you use it as the event in a
+    /// [`tokio::select!`](crate::select) statement and some other branch
+    /// completes first, then it is guaranteed that no signal is lost.
+    ///
     /// # Examples
     ///
     /// Wait for SIGHUP
@@ -474,4 +507,15 @@
         )
         .unwrap_err();
     }
+
+    #[test]
+    fn from_c_int() {
+        assert_eq!(SignalKind::from(2), SignalKind::interrupt());
+    }
+
+    #[test]
+    fn into_c_int() {
+        let value: std::os::raw::c_int = SignalKind::interrupt().into();
+        assert_eq!(value, libc::SIGINT as _);
+    }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/barrier.rs b/third_party/rust_crates/vendor/tokio/src/sync/barrier.rs
index dfc76a4..b2f24bb 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/barrier.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/barrier.rs
@@ -105,7 +105,7 @@
             n,
             wait,
             #[cfg(all(tokio_unstable, feature = "tracing"))]
-            resource_span: resource_span,
+            resource_span,
         }
     }
 
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/batch_semaphore.rs b/third_party/rust_crates/vendor/tokio/src/sync/batch_semaphore.rs
index 4f5efff..4db8835 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/batch_semaphore.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/batch_semaphore.rs
@@ -582,7 +582,7 @@
 
                 tracing::trace!(
                     target: "runtime::resource::async_op::state_update",
-                    permits_obtained = 0 as usize,
+                    permits_obtained = 0usize,
                     permits.op = "override",
                 );
 
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/broadcast.rs b/third_party/rust_crates/vendor/tokio/src/sync/broadcast.rs
index 0d9cd3b..c796d12 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/broadcast.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/broadcast.rs
@@ -230,7 +230,7 @@
     ///
     /// [`recv`]: crate::sync::broadcast::Receiver::recv
     /// [`Receiver`]: crate::sync::broadcast::Receiver
-    #[derive(Debug, PartialEq)]
+    #[derive(Debug, PartialEq, Clone)]
     pub enum RecvError {
         /// There are no more active senders implying no further messages will ever
         /// be sent.
@@ -258,7 +258,7 @@
     ///
     /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv
     /// [`Receiver`]: crate::sync::broadcast::Receiver
-    #[derive(Debug, PartialEq)]
+    #[derive(Debug, PartialEq, Clone)]
     pub enum TryRecvError {
         /// The channel is currently empty. There are still active
         /// [`Sender`] handles, so data may yet become available.
@@ -425,6 +425,11 @@
 ///     tx.send(20).unwrap();
 /// }
 /// ```
+///
+/// # Panics
+///
+/// This will panic if `capacity` is equal to `0` or larger
+/// than `usize::MAX / 2`.
 pub fn channel<T: Clone>(mut capacity: usize) -> (Sender<T>, Receiver<T>) {
     assert!(capacity > 0, "capacity is empty");
     assert!(capacity <= usize::MAX >> 1, "requested capacity too large");
@@ -642,6 +647,7 @@
     }
 }
 
+/// Create a new `Receiver` which reads starting from the tail.
 fn new_receiver<T>(shared: Arc<Shared<T>>) -> Receiver<T> {
     let mut tail = shared.tail.lock();
 
@@ -691,6 +697,73 @@
 }
 
 impl<T> Receiver<T> {
+    /// Returns the number of messages that were sent into the channel and that
+    /// this [`Receiver`] has yet to receive.
+    ///
+    /// If the returned value from `len` is larger than the next largest power of 2
+    /// of the capacity of the channel any call to [`recv`] will return an
+    /// `Err(RecvError::Lagged)` and any call to [`try_recv`] will return an
+    /// `Err(TryRecvError::Lagged)`, e.g. if the capacity of the channel is 10,
+    /// [`recv`] will start to return `Err(RecvError::Lagged)` once `len` returns
+    /// values larger than 16.
+    ///
+    /// [`Receiver`]: crate::sync::broadcast::Receiver
+    /// [`recv`]: crate::sync::broadcast::Receiver::recv
+    /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::sync::broadcast;
+    ///
+    /// #[tokio::main]
+    /// async fn main() {
+    ///     let (tx, mut rx1) = broadcast::channel(16);
+    ///
+    ///     tx.send(10).unwrap();
+    ///     tx.send(20).unwrap();
+    ///
+    ///     assert_eq!(rx1.len(), 2);
+    ///     assert_eq!(rx1.recv().await.unwrap(), 10);
+    ///     assert_eq!(rx1.len(), 1);
+    ///     assert_eq!(rx1.recv().await.unwrap(), 20);     
+    ///     assert_eq!(rx1.len(), 0);
+    /// }
+    /// ```
+    pub fn len(&self) -> usize {
+        let next_send_pos = self.shared.tail.lock().pos;
+        (next_send_pos - self.next) as usize
+    }
+
+    /// Returns true if there aren't any messages in the channel that the [`Receiver`]
+    /// has yet to receive.
+    ///
+    /// [`Receiver]: create::sync::broadcast::Receiver
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::sync::broadcast;
+    ///
+    /// #[tokio::main]
+    /// async fn main() {
+    ///     let (tx, mut rx1) = broadcast::channel(16);
+    ///
+    ///     assert!(rx1.is_empty());
+    ///
+    ///     tx.send(10).unwrap();
+    ///     tx.send(20).unwrap();
+    ///
+    ///     assert!(!rx1.is_empty());
+    ///     assert_eq!(rx1.recv().await.unwrap(), 10);
+    ///     assert_eq!(rx1.recv().await.unwrap(), 20);     
+    ///     assert!(rx1.is_empty());
+    /// }
+    /// ```
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
     /// Locks the next value if there is one.
     fn recv_ref(
         &mut self,
@@ -809,6 +882,33 @@
 }
 
 impl<T: Clone> Receiver<T> {
+    /// Re-subscribes to the channel starting from the current tail element.
+    ///
+    /// This [`Receiver`] handle will receive a clone of all values sent
+    /// **after** it has resubscribed. This will not include elements that are
+    /// in the queue of the current receiver. Consider the following example.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::sync::broadcast;
+    ///
+    /// #[tokio::main]
+    /// async fn main() {
+    ///   let (tx, mut rx) = broadcast::channel(2);
+    ///
+    ///   tx.send(1).unwrap();
+    ///   let mut rx2 = rx.resubscribe();
+    ///   tx.send(2).unwrap();
+    ///
+    ///   assert_eq!(rx2.recv().await.unwrap(), 2);
+    ///   assert_eq!(rx.recv().await.unwrap(), 1);
+    /// }
+    /// ```
+    pub fn resubscribe(&self) -> Self {
+        let shared = self.shared.clone();
+        new_receiver(shared)
+    }
     /// Receives the next value for this receiver.
     ///
     /// Each [`Receiver`] handle will receive a clone of all values sent
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/mpsc/error.rs b/third_party/rust_crates/vendor/tokio/src/sync/mpsc/error.rs
index 3fe6bac..1c789da 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/mpsc/error.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/mpsc/error.rs
@@ -78,7 +78,7 @@
 // ===== RecvError =====
 
 /// Error returned by `Receiver`.
-#[derive(Debug)]
+#[derive(Debug, Clone)]
 #[doc(hidden)]
 #[deprecated(note = "This type is unused because recv returns an Option.")]
 pub struct RecvError(());
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/mpsc/unbounded.rs b/third_party/rust_crates/vendor/tokio/src/sync/mpsc/unbounded.rs
index b133f9f..f8338fb0 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/mpsc/unbounded.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/mpsc/unbounded.rs
@@ -79,8 +79,14 @@
 
     /// Receives the next value for this receiver.
     ///
-    /// `None` is returned when all `Sender` halves have dropped, indicating
-    /// that no further values can be sent on the channel.
+    /// This method returns `None` if the channel has been closed and there are
+    /// no remaining messages in the channel's buffer. This indicates that no
+    /// further values can ever be received from this `Receiver`. The channel is
+    /// closed when all senders have been dropped, or when [`close`] is called.
+    ///
+    /// If there are no messages in the channel's buffer, but the channel has
+    /// not yet been closed, this method will sleep until a message is sent or
+    /// the channel is closed.
     ///
     /// # Cancel safety
     ///
@@ -89,6 +95,8 @@
     /// completes first, it is guaranteed that no messages were received on this
     /// channel.
     ///
+    /// [`close`]: Self::close
+    ///
     /// # Examples
     ///
     /// ```
@@ -207,6 +215,9 @@
     ///
     /// This prevents any further messages from being sent on the channel while
     /// still enabling the receiver to drain messages that are buffered.
+    ///
+    /// To guarantee that no messages are dropped, after calling `close()`,
+    /// `recv()` must be called until `None` is returned.
     pub fn close(&mut self) {
         self.chan.close();
     }
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/notify.rs b/third_party/rust_crates/vendor/tokio/src/sync/notify.rs
index 83d0de4..2af9baca 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/notify.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/notify.rs
@@ -26,22 +26,23 @@
 /// `Notify` itself does not carry any data. Instead, it is to be used to signal
 /// another task to perform an operation.
 ///
-/// `Notify` can be thought of as a [`Semaphore`] starting with 0 permits.
-/// [`notified().await`] waits for a permit to become available, and [`notify_one()`]
-/// sets a permit **if there currently are no available permits**.
+/// A `Notify` can be thought of as a [`Semaphore`] starting with 0 permits. The
+/// [`notified().await`] method waits for a permit to become available, and
+/// [`notify_one()`] sets a permit **if there currently are no available
+/// permits**.
 ///
 /// The synchronization details of `Notify` are similar to
 /// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`]
 /// value contains a single permit. [`notified().await`] waits for the permit to
-/// be made available, consumes the permit, and resumes.  [`notify_one()`] sets the
-/// permit, waking a pending task if there is one.
+/// be made available, consumes the permit, and resumes.  [`notify_one()`] sets
+/// the permit, waking a pending task if there is one.
 ///
-/// If `notify_one()` is called **before** `notified().await`, then the next call to
-/// `notified().await` will complete immediately, consuming the permit. Any
-/// subsequent calls to `notified().await` will wait for a new permit.
+/// If `notify_one()` is called **before** `notified().await`, then the next
+/// call to `notified().await` will complete immediately, consuming the permit.
+/// Any subsequent calls to `notified().await` will wait for a new permit.
 ///
-/// If `notify_one()` is called **multiple** times before `notified().await`, only a
-/// **single** permit is stored. The next call to `notified().await` will
+/// If `notify_one()` is called **multiple** times before `notified().await`,
+/// only a **single** permit is stored. The next call to `notified().await` will
 /// complete immediately, but the one after will wait for a new permit.
 ///
 /// # Examples
@@ -70,7 +71,11 @@
 /// }
 /// ```
 ///
-/// Unbound mpsc channel.
+/// Unbound multi-producer single-consumer (mpsc) channel.
+///
+/// No wakeups can be lost when using this channel because the call to
+/// `notify_one()` will store a permit in the `Notify`, which the following call
+/// to `notified()` will consume.
 ///
 /// ```
 /// use tokio::sync::Notify;
@@ -92,6 +97,8 @@
 ///         self.notify.notify_one();
 ///     }
 ///
+///     // This is a single-consumer channel, so several concurrent calls to
+///     // `recv` are not allowed.
 ///     pub async fn recv(&self) -> T {
 ///         loop {
 ///             // Drain values
@@ -106,10 +113,87 @@
 /// }
 /// ```
 ///
+/// Unbound multi-producer multi-consumer (mpmc) channel.
+///
+/// The call to [`enable`] is important because otherwise if you have two
+/// calls to `recv` and two calls to `send` in parallel, the following could
+/// happen:
+///
+///  1. Both calls to `try_recv` return `None`.
+///  2. Both new elements are added to the vector.
+///  3. The `notify_one` method is called twice, adding only a single
+///     permit to the `Notify`.
+///  4. Both calls to `recv` reach the `Notified` future. One of them
+///     consumes the permit, and the other sleeps forever.
+///
+/// By adding the `Notified` futures to the list by calling `enable` before
+/// `try_recv`, the `notify_one` calls in step three would remove the
+/// futures from the list and mark them notified instead of adding a permit
+/// to the `Notify`. This ensures that both futures are woken.
+///
+/// Notice that this failure can only happen if there are two concurrent calls
+/// to `recv`. This is why the mpsc example above does not require a call to
+/// `enable`.
+///
+/// ```
+/// use tokio::sync::Notify;
+///
+/// use std::collections::VecDeque;
+/// use std::sync::Mutex;
+///
+/// struct Channel<T> {
+///     messages: Mutex<VecDeque<T>>,
+///     notify_on_sent: Notify,
+/// }
+///
+/// impl<T> Channel<T> {
+///     pub fn send(&self, msg: T) {
+///         let mut locked_queue = self.messages.lock().unwrap();
+///         locked_queue.push_back(msg);
+///         drop(locked_queue);
+///
+///         // Send a notification to one of the calls currently
+///         // waiting in a call to `recv`.
+///         self.notify_on_sent.notify_one();
+///     }
+///
+///     pub fn try_recv(&self) -> Option<T> {
+///         let mut locked_queue = self.messages.lock().unwrap();
+///         locked_queue.pop_front()
+///     }
+///
+///     pub async fn recv(&self) -> T {
+///         let future = self.notify_on_sent.notified();
+///         tokio::pin!(future);
+///
+///         loop {
+///             // Make sure that no wakeup is lost if we get
+///             // `None` from `try_recv`.
+///             future.as_mut().enable();
+///
+///             if let Some(msg) = self.try_recv() {
+///                 return msg;
+///             }
+///
+///             // Wait for a call to `notify_one`.
+///             //
+///             // This uses `.as_mut()` to avoid consuming the future,
+///             // which lets us call `Pin::set` below.
+///             future.as_mut().await;
+///
+///             // Reset the future in case another call to
+///             // `try_recv` got the message before us.
+///             future.set(self.notify_on_sent.notified());
+///         }
+///     }
+/// }
+/// ```
+///
 /// [park]: std::thread::park
 /// [unpark]: std::thread::Thread::unpark
 /// [`notified().await`]: Notify::notified()
 /// [`notify_one()`]: Notify::notify_one()
+/// [`enable`]: Notified::enable()
 /// [`Semaphore`]: crate::sync::Semaphore
 #[derive(Debug)]
 pub struct Notify {
@@ -145,7 +229,10 @@
     _p: PhantomPinned,
 }
 
-/// Future returned from [`Notify::notified()`]
+/// Future returned from [`Notify::notified()`].
+///
+/// This future is fused, so once it has completed, any future calls to poll
+/// will immediately return `Poll::Ready`.
 #[derive(Debug)]
 pub struct Notified<'a> {
     /// The `Notify` being received on.
@@ -249,7 +336,16 @@
     /// immediately, consuming that permit. Otherwise, `notified().await` waits
     /// for a permit to be made available by the next call to `notify_one()`.
     ///
+    /// The `Notified` future is not guaranteed to receive wakeups from calls to
+    /// `notify_one()` if it has not yet been polled. See the documentation for
+    /// [`Notified::enable()`] for more details.
+    ///
+    /// The `Notified` future is guaranteed to receive wakeups from
+    /// `notify_waiters()` as soon as it has been created, even if it has not
+    /// yet been polled.
+    ///
     /// [`notify_one()`]: Notify::notify_one
+    /// [`Notified::enable()`]: Notified::enable
     ///
     /// # Cancel safety
     ///
@@ -405,7 +501,7 @@
         // transition out of WAITING while the lock is held.
         let curr = self.state.load(SeqCst);
 
-        if let EMPTY | NOTIFIED = get_state(curr) {
+        if matches!(get_state(curr), EMPTY | NOTIFIED) {
             // There are no waiting tasks. All we need to do is increment the
             // number of times this method was called.
             atomic_inc_num_notify_waiters_calls(&self.state);
@@ -513,6 +609,114 @@
 // ===== impl Notified =====
 
 impl Notified<'_> {
+    /// Adds this future to the list of futures that are ready to receive
+    /// wakeups from calls to [`notify_one`].
+    ///
+    /// Polling the future also adds it to the list, so this method should only
+    /// be used if you want to add the future to the list before the first call
+    /// to `poll`. (In fact, this method is equivalent to calling `poll` except
+    /// that no `Waker` is registered.)
+    ///
+    /// This has no effect on notifications sent using [`notify_waiters`], which
+    /// are received as long as they happen after the creation of the `Notified`
+    /// regardless of whether `enable` or `poll` has been called.
+    ///
+    /// This method returns true if the `Notified` is ready. This happens in the
+    /// following situations:
+    ///
+    ///  1. The `notify_waiters` method was called between the creation of the
+    ///     `Notified` and the call to this method.
+    ///  2. This is the first call to `enable` or `poll` on this future, and the
+    ///     `Notify` was holding a permit from a previous call to `notify_one`.
+    ///     The call consumes the permit in that case.
+    ///  3. The future has previously been enabled or polled, and it has since
+    ///     then been marked ready by either consuming a permit from the
+    ///     `Notify`, or by a call to `notify_one` or `notify_waiters` that
+    ///     removed it from the list of futures ready to receive wakeups.
+    ///
+    /// If this method returns true, any future calls to poll on the same future
+    /// will immediately return `Poll::Ready`.
+    ///
+    /// # Examples
+    ///
+    /// Unbound multi-producer multi-consumer (mpmc) channel.
+    ///
+    /// The call to `enable` is important because otherwise if you have two
+    /// calls to `recv` and two calls to `send` in parallel, the following could
+    /// happen:
+    ///
+    ///  1. Both calls to `try_recv` return `None`.
+    ///  2. Both new elements are added to the vector.
+    ///  3. The `notify_one` method is called twice, adding only a single
+    ///     permit to the `Notify`.
+    ///  4. Both calls to `recv` reach the `Notified` future. One of them
+    ///     consumes the permit, and the other sleeps forever.
+    ///
+    /// By adding the `Notified` futures to the list by calling `enable` before
+    /// `try_recv`, the `notify_one` calls in step three would remove the
+    /// futures from the list and mark them notified instead of adding a permit
+    /// to the `Notify`. This ensures that both futures are woken.
+    ///
+    /// ```
+    /// use tokio::sync::Notify;
+    ///
+    /// use std::collections::VecDeque;
+    /// use std::sync::Mutex;
+    ///
+    /// struct Channel<T> {
+    ///     messages: Mutex<VecDeque<T>>,
+    ///     notify_on_sent: Notify,
+    /// }
+    ///
+    /// impl<T> Channel<T> {
+    ///     pub fn send(&self, msg: T) {
+    ///         let mut locked_queue = self.messages.lock().unwrap();
+    ///         locked_queue.push_back(msg);
+    ///         drop(locked_queue);
+    ///
+    ///         // Send a notification to one of the calls currently
+    ///         // waiting in a call to `recv`.
+    ///         self.notify_on_sent.notify_one();
+    ///     }
+    ///
+    ///     pub fn try_recv(&self) -> Option<T> {
+    ///         let mut locked_queue = self.messages.lock().unwrap();
+    ///         locked_queue.pop_front()
+    ///     }
+    ///
+    ///     pub async fn recv(&self) -> T {
+    ///         let future = self.notify_on_sent.notified();
+    ///         tokio::pin!(future);
+    ///
+    ///         loop {
+    ///             // Make sure that no wakeup is lost if we get
+    ///             // `None` from `try_recv`.
+    ///             future.as_mut().enable();
+    ///
+    ///             if let Some(msg) = self.try_recv() {
+    ///                 return msg;
+    ///             }
+    ///
+    ///             // Wait for a call to `notify_one`.
+    ///             //
+    ///             // This uses `.as_mut()` to avoid consuming the future,
+    ///             // which lets us call `Pin::set` below.
+    ///             future.as_mut().await;
+    ///
+    ///             // Reset the future in case another call to
+    ///             // `try_recv` got the message before us.
+    ///             future.set(self.notify_on_sent.notified());
+    ///         }
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// [`notify_one`]: Notify::notify_one()
+    /// [`notify_waiters`]: Notify::notify_waiters()
+    pub fn enable(self: Pin<&mut Self>) -> bool {
+        self.poll_notified(None).is_ready()
+    }
+
     /// A custom `project` implementation is used in place of `pin-project-lite`
     /// as a custom drop implementation is needed.
     fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &UnsafeCell<Waiter>) {
@@ -526,12 +730,8 @@
             (me.notify, &mut me.state, &me.waiter)
         }
     }
-}
 
-impl Future for Notified<'_> {
-    type Output = ();
-
-    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+    fn poll_notified(self: Pin<&mut Self>, waker: Option<&Waker>) -> Poll<()> {
         use State::*;
 
         let (notify, state, waiter) = self.project();
@@ -557,7 +757,7 @@
 
                     // Clone the waker before locking, a waker clone can be
                     // triggering arbitrary code.
-                    let waker = cx.waker().clone();
+                    let waker = waker.cloned();
 
                     // Acquire the lock and attempt to transition to the waiting
                     // state.
@@ -618,9 +818,11 @@
                         }
                     }
 
-                    // Safety: called while locked.
-                    unsafe {
-                        (*waiter.get()).waker = Some(waker);
+                    if waker.is_some() {
+                        // Safety: called while locked.
+                        unsafe {
+                            (*waiter.get()).waker = waker;
+                        }
                     }
 
                     // Insert the waiter into the linked list
@@ -652,8 +854,14 @@
                         *state = Done;
                     } else {
                         // Update the waker, if necessary.
-                        if !w.waker.as_ref().unwrap().will_wake(cx.waker()) {
-                            w.waker = Some(cx.waker().clone());
+                        if let Some(waker) = waker {
+                            let should_update = match w.waker.as_ref() {
+                                Some(current_waker) => !current_waker.will_wake(waker),
+                                None => true,
+                            };
+                            if should_update {
+                                w.waker = Some(waker.clone());
+                            }
                         }
 
                         return Poll::Pending;
@@ -674,6 +882,14 @@
     }
 }
 
+impl Future for Notified<'_> {
+    type Output = ();
+
+    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+        self.poll_notified(Some(cx.waker()))
+    }
+}
+
 impl Drop for Notified<'_> {
     fn drop(&mut self) {
         use State::*;
@@ -684,7 +900,7 @@
         // This is where we ensure safety. The `Notified` value is being
         // dropped, which means we must ensure that the waiter entry is no
         // longer stored in the linked list.
-        if let Waiting = *state {
+        if matches!(*state, Waiting) {
             let mut waiters = notify.waiters.lock();
             let mut notify_state = notify.state.load(SeqCst);
 
@@ -694,11 +910,9 @@
             // being the only `LinkedList` available to the type.
             unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) };
 
-            if waiters.is_empty() {
-                if let WAITING = get_state(notify_state) {
-                    notify_state = set_state(notify_state, EMPTY);
-                    notify.state.store(notify_state, SeqCst);
-                }
+            if waiters.is_empty() && get_state(notify_state) == WAITING {
+                notify_state = set_state(notify_state, EMPTY);
+                notify.state.store(notify_state, SeqCst);
             }
 
             // See if the node was notified but not received. In this case, if
@@ -707,7 +921,10 @@
             //
             // Safety: with the entry removed from the linked list, there can be
             // no concurrent access to the entry
-            if let Some(NotificationType::OneWaiter) = unsafe { (*waiter.get()).notified } {
+            if matches!(
+                unsafe { (*waiter.get()).notified },
+                Some(NotificationType::OneWaiter)
+            ) {
                 if let Some(waker) = notify_locked(&mut waiters, &notify.state, notify_state) {
                     drop(waiters);
                     waker.wake();
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/oneshot.rs b/third_party/rust_crates/vendor/tokio/src/sync/oneshot.rs
index 2240074..d5fc811 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/oneshot.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/oneshot.rs
@@ -323,11 +323,11 @@
     use std::fmt;
 
     /// Error returned by the `Future` implementation for `Receiver`.
-    #[derive(Debug, Eq, PartialEq)]
+    #[derive(Debug, Eq, PartialEq, Clone)]
     pub struct RecvError(pub(super) ());
 
     /// Error returned by the `try_recv` function on `Receiver`.
-    #[derive(Debug, Eq, PartialEq)]
+    #[derive(Debug, Eq, PartialEq, Clone)]
     pub enum TryRecvError {
         /// The send half of the channel has not yet sent a value.
         Empty,
@@ -526,7 +526,7 @@
     let rx = Receiver {
         inner: Some(inner),
         #[cfg(all(tokio_unstable, feature = "tracing"))]
-        resource_span: resource_span,
+        resource_span,
         #[cfg(all(tokio_unstable, feature = "tracing"))]
         async_op_span,
         #[cfg(all(tokio_unstable, feature = "tracing"))]
diff --git a/third_party/rust_crates/vendor/tokio/src/sync/watch.rs b/third_party/rust_crates/vendor/tokio/src/sync/watch.rs
index 5673e0f..184ba4b 100644
--- a/third_party/rust_crates/vendor/tokio/src/sync/watch.rs
+++ b/third_party/rust_crates/vendor/tokio/src/sync/watch.rs
@@ -60,6 +60,7 @@
 use crate::loom::sync::{Arc, RwLock, RwLockReadGuard};
 use std::mem;
 use std::ops;
+use std::panic;
 
 /// Receives values from the associated [`Sender`](struct@Sender).
 ///
@@ -154,7 +155,7 @@
     impl<T: fmt::Debug> std::error::Error for SendError<T> {}
 
     /// Error produced when receiving a change notification.
-    #[derive(Debug)]
+    #[derive(Debug, Clone)]
     pub struct RecvError(pub(super) ());
 
     // ===== impl RecvError =====
@@ -466,6 +467,22 @@
         }
     }
 
+    /// Returns `true` if receivers belong to the same channel.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let (tx, rx) = tokio::sync::watch::channel(true);
+    /// let rx2 = rx.clone();
+    /// assert!(rx.same_channel(&rx2));
+    ///
+    /// let (tx3, rx3) = tokio::sync::watch::channel(true);
+    /// assert!(!rx3.same_channel(&rx2));
+    /// ```
+    pub fn same_channel(&self, other: &Self) -> bool {
+        Arc::ptr_eq(&self.shared, &other.shared)
+    }
+
     cfg_process_driver! {
         pub(crate) fn try_has_changed(&mut self) -> Option<Result<(), error::RecvError>> {
             maybe_changed(&self.shared, &mut self.version)
@@ -530,6 +547,145 @@
         Ok(())
     }
 
+    /// Modifies the watched value **unconditionally** in-place,
+    /// notifying all receivers.
+    ///
+    /// This can useful for modifying the watched value, without
+    /// having to allocate a new instance. Additionally, this
+    /// method permits sending values even when there are no receivers.
+    ///
+    /// Prefer to use the more versatile function [`Self::send_if_modified()`]
+    /// if the value is only modified conditionally during the mutable borrow
+    /// to prevent unneeded change notifications for unmodified values.
+    ///
+    /// # Panics
+    ///
+    /// This function panics when the invocation of the `modify` closure panics.
+    /// No receivers are notified when panicking. All changes of the watched
+    /// value applied by the closure before panicking will be visible in
+    /// subsequent calls to `borrow`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::sync::watch;
+    ///
+    /// struct State {
+    ///     counter: usize,
+    /// }
+    /// let (state_tx, state_rx) = watch::channel(State { counter: 0 });
+    /// state_tx.send_modify(|state| state.counter += 1);
+    /// assert_eq!(state_rx.borrow().counter, 1);
+    /// ```
+    pub fn send_modify<F>(&self, modify: F)
+    where
+        F: FnOnce(&mut T),
+    {
+        self.send_if_modified(|value| {
+            modify(value);
+            true
+        });
+    }
+
+    /// Modifies the watched value **conditionally** in-place,
+    /// notifying all receivers only if modified.
+    ///
+    /// This can useful for modifying the watched value, without
+    /// having to allocate a new instance. Additionally, this
+    /// method permits sending values even when there are no receivers.
+    ///
+    /// The `modify` closure must return `true` if the value has actually
+    /// been modified during the mutable borrow. It should only return `false`
+    /// if the value is guaranteed to be unnmodified despite the mutable
+    /// borrow.
+    ///
+    /// Receivers are only notified if the closure returned `true`. If the
+    /// closure has modified the value but returned `false` this results
+    /// in a *silent modification*, i.e. the modified value will be visible
+    /// in subsequent calls to `borrow`, but receivers will not receive
+    /// a change notification.
+    ///
+    /// Returns the result of the closure, i.e. `true` if the value has
+    /// been modified and `false` otherwise.
+    ///
+    /// # Panics
+    ///
+    /// This function panics when the invocation of the `modify` closure panics.
+    /// No receivers are notified when panicking. All changes of the watched
+    /// value applied by the closure before panicking will be visible in
+    /// subsequent calls to `borrow`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::sync::watch;
+    ///
+    /// struct State {
+    ///     counter: usize,
+    /// }
+    /// let (state_tx, mut state_rx) = watch::channel(State { counter: 1 });
+    /// let inc_counter_if_odd = |state: &mut State| {
+    ///     if state.counter % 2 == 1 {
+    ///         state.counter += 1;
+    ///         return true;
+    ///     }
+    ///     false
+    /// };
+    ///
+    /// assert_eq!(state_rx.borrow().counter, 1);
+    ///
+    /// assert!(!state_rx.has_changed().unwrap());
+    /// assert!(state_tx.send_if_modified(inc_counter_if_odd));
+    /// assert!(state_rx.has_changed().unwrap());
+    /// assert_eq!(state_rx.borrow_and_update().counter, 2);
+    ///
+    /// assert!(!state_rx.has_changed().unwrap());
+    /// assert!(!state_tx.send_if_modified(inc_counter_if_odd));
+    /// assert!(!state_rx.has_changed().unwrap());
+    /// assert_eq!(state_rx.borrow_and_update().counter, 2);
+    /// ```
+    pub fn send_if_modified<F>(&self, modify: F) -> bool
+    where
+        F: FnOnce(&mut T) -> bool,
+    {
+        {
+            // Acquire the write lock and update the value.
+            let mut lock = self.shared.value.write().unwrap();
+
+            // Update the value and catch possible panic inside func.
+            let result = panic::catch_unwind(panic::AssertUnwindSafe(|| modify(&mut lock)));
+            match result {
+                Ok(modified) => {
+                    if !modified {
+                        // Abort, i.e. don't notify receivers if unmodified
+                        return false;
+                    }
+                    // Continue if modified
+                }
+                Err(panicked) => {
+                    // Drop the lock to avoid poisoning it.
+                    drop(lock);
+                    // Forward the panic to the caller.
+                    panic::resume_unwind(panicked);
+                    // Unreachable
+                }
+            };
+
+            self.shared.state.increment_version();
+
+            // Release the write lock.
+            //
+            // Incrementing the version counter while holding the lock ensures
+            // that receivers are able to figure out the version number of the
+            // value they are currently looking at.
+            drop(lock);
+        }
+
+        self.shared.notify_rx.notify_waiters();
+
+        true
+    }
+
     /// Sends a new value via the channel, notifying all receivers and returning
     /// the previous value in the channel.
     ///
@@ -546,28 +702,11 @@
     /// assert_eq!(tx.send_replace(2), 1);
     /// assert_eq!(tx.send_replace(3), 2);
     /// ```
-    pub fn send_replace(&self, value: T) -> T {
-        let old = {
-            // Acquire the write lock and update the value.
-            let mut lock = self.shared.value.write().unwrap();
-            let old = mem::replace(&mut *lock, value);
+    pub fn send_replace(&self, mut value: T) -> T {
+        // swap old watched value with the new one
+        self.send_modify(|old| mem::swap(old, &mut value));
 
-            self.shared.state.increment_version();
-
-            // Release the write lock.
-            //
-            // Incrementing the version counter while holding the lock ensures
-            // that receivers are able to figure out the version number of the
-            // value they are currently looking at.
-            drop(lock);
-
-            old
-        };
-
-        // Notify all watchers
-        self.shared.notify_rx.notify_waiters();
-
-        old
+        value
     }
 
     /// Returns a reference to the most recently sent value
diff --git a/third_party/rust_crates/vendor/tokio/src/task/builder.rs b/third_party/rust_crates/vendor/tokio/src/task/builder.rs
index 2086302..ddb5c43 100644
--- a/third_party/rust_crates/vendor/tokio/src/task/builder.rs
+++ b/third_party/rust_crates/vendor/tokio/src/task/builder.rs
@@ -1,5 +1,8 @@
 #![allow(unreachable_pub)]
-use crate::{runtime::context, task::JoinHandle};
+use crate::{
+    runtime::{context, Handle},
+    task::{JoinHandle, LocalSet},
+};
 use std::future::Future;
 
 /// Factory which is used to configure the properties of a new task.
@@ -71,7 +74,11 @@
         Self { name: Some(name) }
     }
 
-    /// Spawns a task on the executor.
+    /// Spawns a task with this builder's settings on the current runtime.
+    ///
+    /// # Panics
+    ///
+    /// This method panics if called outside of a Tokio runtime.
     ///
     /// See [`task::spawn`](crate::task::spawn) for
     /// more details.
@@ -84,10 +91,36 @@
         super::spawn::spawn_inner(future, self.name)
     }
 
-    /// Spawns a task on the current thread.
+    /// Spawn a task with this builder's settings on the provided [runtime
+    /// handle].
     ///
-    /// See [`task::spawn_local`](crate::task::spawn_local)
-    /// for more details.
+    /// See [`Handle::spawn`] for more details.
+    ///
+    /// [runtime handle]: crate::runtime::Handle
+    /// [`Handle::spawn`]: crate::runtime::Handle::spawn
+    #[track_caller]
+    pub fn spawn_on<Fut>(&mut self, future: Fut, handle: &Handle) -> JoinHandle<Fut::Output>
+    where
+        Fut: Future + Send + 'static,
+        Fut::Output: Send + 'static,
+    {
+        handle.spawn_named(future, self.name)
+    }
+
+    /// Spawns `!Send` a task on the current [`LocalSet`] with this builder's
+    /// settings.
+    ///
+    /// The spawned future will be run on the same thread that called `spawn_local`.
+    /// This may only be called from the context of a [local task set][`LocalSet`].
+    ///
+    /// # Panics
+    ///
+    /// This function panics if called outside of a [local task set][`LocalSet`].
+    ///
+    /// See [`task::spawn_local`] for more details.
+    ///
+    /// [`task::spawn_local`]: crate::task::spawn_local
+    /// [`LocalSet`]: crate::task::LocalSet
     #[track_caller]
     pub fn spawn_local<Fut>(self, future: Fut) -> JoinHandle<Fut::Output>
     where
@@ -97,8 +130,28 @@
         super::local::spawn_local_inner(future, self.name)
     }
 
+    /// Spawns `!Send` a task on the provided [`LocalSet`] with this builder's
+    /// settings.
+    ///
+    /// See [`LocalSet::spawn_local`] for more details.
+    ///
+    /// [`LocalSet::spawn_local`]: crate::task::LocalSet::spawn_local
+    /// [`LocalSet`]: crate::task::LocalSet
+    #[track_caller]
+    pub fn spawn_local_on<Fut>(self, future: Fut, local_set: &LocalSet) -> JoinHandle<Fut::Output>
+    where
+        Fut: Future + 'static,
+        Fut::Output: 'static,
+    {
+        local_set.spawn_named(future, self.name)
+    }
+
     /// Spawns blocking code on the blocking threadpool.
     ///
+    /// # Panics
+    ///
+    /// This method panics if called outside of a Tokio runtime.
+    ///
     /// See [`task::spawn_blocking`](crate::task::spawn_blocking)
     /// for more details.
     #[track_caller]
@@ -107,9 +160,32 @@
         Function: FnOnce() -> Output + Send + 'static,
         Output: Send + 'static,
     {
+        self.spawn_blocking_on(function, &context::current())
+    }
+
+    /// Spawns blocking code on the provided [runtime handle]'s blocking threadpool.
+    ///
+    /// See [`Handle::spawn_blocking`] for more details.
+    ///
+    /// [runtime handle]: crate::runtime::Handle
+    /// [`Handle::spawn_blocking`]: crate::runtime::Handle::spawn_blocking
+    #[track_caller]
+    pub fn spawn_blocking_on<Function, Output>(
+        self,
+        function: Function,
+        handle: &Handle,
+    ) -> JoinHandle<Output>
+    where
+        Function: FnOnce() -> Output + Send + 'static,
+        Output: Send + 'static,
+    {
         use crate::runtime::Mandatory;
-        let (join_handle, _was_spawned) =
-            context::current().spawn_blocking_inner(function, Mandatory::NonMandatory, self.name);
+        let (join_handle, _was_spawned) = handle.as_inner().spawn_blocking_inner(
+            function,
+            Mandatory::NonMandatory,
+            self.name,
+            handle,
+        );
         join_handle
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/task/consume_budget.rs b/third_party/rust_crates/vendor/tokio/src/task/consume_budget.rs
new file mode 100644
index 0000000..c8b2d7e
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/src/task/consume_budget.rs
@@ -0,0 +1,45 @@
+use std::task::Poll;
+
+/// Consumes a unit of budget and returns the execution back to the Tokio
+/// runtime *if* the task's coop budget was exhausted.
+///
+/// The task will only yield if its entire coop budget has been exhausted.
+/// This function can can be used in order to insert optional yield points into long
+/// computations that do not use Tokio resources like sockets or semaphores,
+/// without redundantly yielding to the runtime each time.
+///
+/// **Note**: This is an [unstable API][unstable]. The public API of this type
+/// may break in 1.x releases. See [the documentation on unstable
+/// features][unstable] for details.
+///
+/// # Examples
+///
+/// Make sure that a function which returns a sum of (potentially lots of)
+/// iterated values is cooperative.
+///
+/// ```
+/// async fn sum_iterator(input: &mut impl std::iter::Iterator<Item=i64>) -> i64 {
+///     let mut sum: i64 = 0;
+///     while let Some(i) = input.next() {
+///         sum += i;
+///         tokio::task::consume_budget().await
+///     }
+///     sum
+/// }
+/// ```
+/// [unstable]: crate#unstable-features
+#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "rt"))))]
+pub async fn consume_budget() {
+    let mut status = Poll::Pending;
+
+    crate::future::poll_fn(move |cx| {
+        if status.is_ready() {
+            return status;
+        }
+        status = crate::coop::poll_proceed(cx).map(|restore| {
+            restore.made_progress();
+        });
+        status
+    })
+    .await
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/task/join_set.rs b/third_party/rust_crates/vendor/tokio/src/task/join_set.rs
index 8e8f74f..2f93182 100644
--- a/third_party/rust_crates/vendor/tokio/src/task/join_set.rs
+++ b/third_party/rust_crates/vendor/tokio/src/task/join_set.rs
@@ -1,10 +1,16 @@
+//! A collection of tasks spawned on a Tokio runtime.
+//!
+//! This module provides the [`JoinSet`] type, a collection which stores a set
+//! of spawned tasks and allows asynchronously awaiting the output of those
+//! tasks as they complete. See the documentation for the [`JoinSet`] type for
+//! details.
 use std::fmt;
 use std::future::Future;
 use std::pin::Pin;
 use std::task::{Context, Poll};
 
 use crate::runtime::Handle;
-use crate::task::{JoinError, JoinHandle, LocalSet};
+use crate::task::{AbortHandle, Id, JoinError, JoinHandle, LocalSet};
 use crate::util::IdleNotifiedSet;
 
 /// A collection of tasks spawned on a Tokio runtime.
@@ -37,8 +43,9 @@
 ///     }
 ///
 ///     let mut seen = [false; 10];
-///     while let Some(res) = set.join_one().await.unwrap() {
-///         seen[res] = true;
+///     while let Some(res) = set.join_one().await {
+///         let idx = res.unwrap();
+///         seen[idx] = true;
 ///     }
 ///
 ///     for i in 0..10 {
@@ -48,10 +55,23 @@
 /// ```
 ///
 /// [unstable]: crate#unstable-features
+#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))]
 pub struct JoinSet<T> {
     inner: IdleNotifiedSet<JoinHandle<T>>,
 }
 
+/// A variant of [`task::Builder`] that spawns tasks on a [`JoinSet`] rather
+/// than on the current default runtime.
+///
+/// [`task::Builder`]: crate::task::Builder
+#[cfg(all(tokio_unstable, feature = "tracing"))]
+#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))]
+#[must_use = "builders do nothing unless used to spawn a task"]
+pub struct Builder<'a, T> {
+    joinset: &'a mut JoinSet<T>,
+    builder: super::Builder<'a>,
+}
+
 impl<T> JoinSet<T> {
     /// Create a new `JoinSet`.
     pub fn new() -> Self {
@@ -72,61 +92,107 @@
 }
 
 impl<T: 'static> JoinSet<T> {
-    /// Spawn the provided task on the `JoinSet`.
+    /// Returns a [`Builder`] that can be used to configure a task prior to
+    /// spawning it on this `JoinSet`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tokio::task::JoinSet;
+    ///
+    /// #[tokio::main]
+    /// async fn main() {
+    ///     let mut set = JoinSet::new();
+    ///
+    ///     // Use the builder to configure a task's name before spawning it.
+    ///     set.build_task()
+    ///         .name("my_task")
+    ///         .spawn(async { /* ... */ });
+    /// }
+    /// ```
+    #[cfg(all(tokio_unstable, feature = "tracing"))]
+    #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))]
+    pub fn build_task(&mut self) -> Builder<'_, T> {
+        Builder {
+            builder: super::Builder::new(),
+            joinset: self,
+        }
+    }
+
+    /// Spawn the provided task on the `JoinSet`, returning an [`AbortHandle`]
+    /// that can be used to remotely cancel the task.
     ///
     /// # Panics
     ///
     /// This method panics if called outside of a Tokio runtime.
-    pub fn spawn<F>(&mut self, task: F)
+    ///
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn<F>(&mut self, task: F) -> AbortHandle
     where
         F: Future<Output = T>,
         F: Send + 'static,
         T: Send,
     {
-        self.insert(crate::spawn(task));
+        self.insert(crate::spawn(task))
     }
 
-    /// Spawn the provided task on the provided runtime and store it in this `JoinSet`.
-    pub fn spawn_on<F>(&mut self, task: F, handle: &Handle)
+    /// Spawn the provided task on the provided runtime and store it in this
+    /// `JoinSet` returning an [`AbortHandle`] that can be used to remotely
+    /// cancel the task.
+    ///
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn_on<F>(&mut self, task: F, handle: &Handle) -> AbortHandle
     where
         F: Future<Output = T>,
         F: Send + 'static,
         T: Send,
     {
-        self.insert(handle.spawn(task));
+        self.insert(handle.spawn(task))
     }
 
-    /// Spawn the provided task on the current [`LocalSet`] and store it in this `JoinSet`.
+    /// Spawn the provided task on the current [`LocalSet`] and store it in this
+    /// `JoinSet`, returning an [`AbortHandle`] that can be used to remotely
+    /// cancel the task.
     ///
     /// # Panics
     ///
     /// This method panics if it is called outside of a `LocalSet`.
     ///
     /// [`LocalSet`]: crate::task::LocalSet
-    pub fn spawn_local<F>(&mut self, task: F)
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn_local<F>(&mut self, task: F) -> AbortHandle
     where
         F: Future<Output = T>,
         F: 'static,
     {
-        self.insert(crate::task::spawn_local(task));
+        self.insert(crate::task::spawn_local(task))
     }
 
-    /// Spawn the provided task on the provided [`LocalSet`] and store it in this `JoinSet`.
+    /// Spawn the provided task on the provided [`LocalSet`] and store it in
+    /// this `JoinSet`, returning an [`AbortHandle`] that can be used to
+    /// remotely cancel the task.
     ///
     /// [`LocalSet`]: crate::task::LocalSet
-    pub fn spawn_local_on<F>(&mut self, task: F, local_set: &LocalSet)
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn_local_on<F>(&mut self, task: F, local_set: &LocalSet) -> AbortHandle
     where
         F: Future<Output = T>,
         F: 'static,
     {
-        self.insert(local_set.spawn_local(task));
+        self.insert(local_set.spawn_local(task))
     }
 
-    fn insert(&mut self, jh: JoinHandle<T>) {
+    fn insert(&mut self, jh: JoinHandle<T>) -> AbortHandle {
+        let abort = jh.abort_handle();
         let mut entry = self.inner.insert_idle(jh);
 
         // Set the waker that is notified when the task completes.
         entry.with_value_and_context(|jh, ctx| jh.set_join_waker(ctx.waker()));
+        abort
     }
 
     /// Waits until one of the tasks in the set completes and returns its output.
@@ -138,14 +204,36 @@
     /// This method is cancel safe. If `join_one` is used as the event in a `tokio::select!`
     /// statement and some other branch completes first, it is guaranteed that no tasks were
     /// removed from this `JoinSet`.
-    pub async fn join_one(&mut self) -> Result<Option<T>, JoinError> {
+    pub async fn join_one(&mut self) -> Option<Result<T, JoinError>> {
+        crate::future::poll_fn(|cx| self.poll_join_one(cx))
+            .await
+            .map(|opt| opt.map(|(_, res)| res))
+    }
+
+    /// Waits until one of the tasks in the set completes and returns its
+    /// output, along with the [task ID] of the completed task.
+    ///
+    /// Returns `None` if the set is empty.
+    ///
+    /// When this method returns an error, then the id of the task that failed can be accessed
+    /// using the [`JoinError::id`] method.
+    ///
+    /// # Cancel Safety
+    ///
+    /// This method is cancel safe. If `join_one_with_id` is used as the event in a `tokio::select!`
+    /// statement and some other branch completes first, it is guaranteed that no tasks were
+    /// removed from this `JoinSet`.
+    ///
+    /// [task ID]: crate::task::Id
+    /// [`JoinError::id`]: fn@crate::task::JoinError::id
+    pub async fn join_one_with_id(&mut self) -> Option<Result<(Id, T), JoinError>> {
         crate::future::poll_fn(|cx| self.poll_join_one(cx)).await
     }
 
     /// Aborts all tasks and waits for them to finish shutting down.
     ///
     /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_one`] in
-    /// a loop until it returns `Ok(None)`.
+    /// a loop until it returns `None`.
     ///
     /// This method ignores any panics in the tasks shutting down. When this call returns, the
     /// `JoinSet` will be empty.
@@ -154,7 +242,7 @@
     /// [`join_one`]: fn@Self::join_one
     pub async fn shutdown(&mut self) {
         self.abort_all();
-        while self.join_one().await.transpose().is_some() {}
+        while self.join_one().await.is_some() {}
     }
 
     /// Aborts all tasks on this `JoinSet`.
@@ -175,8 +263,7 @@
 
     /// Polls for one of the tasks in the set to complete.
     ///
-    /// If this returns `Poll::Ready(Ok(Some(_)))` or `Poll::Ready(Err(_))`, then the task that
-    /// completed is removed from the set.
+    /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the set.
     ///
     /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled
     /// to receive a wakeup when a task in the `JoinSet` completes. Note that on multiple calls to
@@ -189,24 +276,26 @@
     ///
     ///  * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is
     ///     available right now.
-    ///  * `Poll::Ready(Ok(Some(value)))` if one of the tasks in this `JoinSet` has completed. The
-    ///    `value` is the return value of one of the tasks that completed.
-    ///  * `Poll::Ready(Err(err))` if one of the tasks in this `JoinSet` has panicked or been
-    ///     aborted.
-    ///  * `Poll::Ready(Ok(None))` if the `JoinSet` is empty.
+    ///  * `Poll::Ready(Some(Ok((id, value))))` if one of the tasks in this `JoinSet` has completed.
+    ///     The `value` is the return value of one of the tasks that completed, and
+    ///    `id` is the [task ID] of that task.
+    ///  * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been
+    ///     aborted. The `err` is the `JoinError` from the panicked/aborted task.
+    ///  * `Poll::Ready(None)` if the `JoinSet` is empty.
     ///
     /// Note that this method may return `Poll::Pending` even if one of the tasks has completed.
     /// This can happen if the [coop budget] is reached.
     ///
     /// [coop budget]: crate::task#cooperative-scheduling
-    fn poll_join_one(&mut self, cx: &mut Context<'_>) -> Poll<Result<Option<T>, JoinError>> {
+    /// [task ID]: crate::task::Id
+    fn poll_join_one(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<(Id, T), JoinError>>> {
         // The call to `pop_notified` moves the entry to the `idle` list. It is moved back to
         // the `notified` list if the waker is notified in the `poll` call below.
         let mut entry = match self.inner.pop_notified(cx.waker()) {
             Some(entry) => entry,
             None => {
                 if self.is_empty() {
-                    return Poll::Ready(Ok(None));
+                    return Poll::Ready(None);
                 } else {
                     // The waker was set by `pop_notified`.
                     return Poll::Pending;
@@ -217,8 +306,10 @@
         let res = entry.with_value_and_context(|jh, ctx| Pin::new(jh).poll(ctx));
 
         if let Poll::Ready(res) = res {
-            entry.remove();
-            Poll::Ready(Some(res).transpose())
+            let entry = entry.remove();
+            // If the task succeeded, add the task ID to the output. Otherwise, the
+            // `JoinError` will already have the task's ID.
+            Poll::Ready(Some(res.map(|output| (entry.id(), output))))
         } else {
             // A JoinHandle generally won't emit a wakeup without being ready unless
             // the coop limit has been reached. We yield to the executor in this
@@ -246,3 +337,112 @@
         Self::new()
     }
 }
+
+// === impl Builder ===
+
+#[cfg(all(tokio_unstable, feature = "tracing"))]
+#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))]
+impl<'a, T: 'static> Builder<'a, T> {
+    /// Assigns a name to the task which will be spawned.
+    pub fn name(self, name: &'a str) -> Self {
+        let builder = self.builder.name(name);
+        Self { builder, ..self }
+    }
+
+    /// Spawn the provided task with this builder's settings and store it in the
+    /// [`JoinSet`], returning an [`AbortHandle`] that can be used to remotely
+    /// cancel the task.
+    ///
+    /// # Returns
+    ///
+    /// An [`AbortHandle`] that can be used to remotely cancel the task.
+    ///
+    /// # Panics
+    ///
+    /// This method panics if called outside of a Tokio runtime.
+    ///
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn<F>(self, future: F) -> AbortHandle
+    where
+        F: Future<Output = T>,
+        F: Send + 'static,
+        T: Send,
+    {
+        self.joinset.insert(self.builder.spawn(future))
+    }
+
+    /// Spawn the provided task on the provided [runtime handle] with this
+    /// builder's settings, and store it in the [`JoinSet`].
+    ///
+    /// # Returns
+    ///
+    /// An [`AbortHandle`] that can be used to remotely cancel the task.
+    ///
+    ///
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    /// [runtime handle]: crate::runtime::Handle
+    #[track_caller]
+    pub fn spawn_on<F>(mut self, future: F, handle: &Handle) -> AbortHandle
+    where
+        F: Future<Output = T>,
+        F: Send + 'static,
+        T: Send,
+    {
+        self.joinset.insert(self.builder.spawn_on(future, handle))
+    }
+
+    /// Spawn the provided task on the current [`LocalSet`] with this builder's
+    /// settings, and store it in the [`JoinSet`].
+    ///
+    /// # Returns
+    ///
+    /// An [`AbortHandle`] that can be used to remotely cancel the task.
+    ///
+    /// # Panics
+    ///
+    /// This method panics if it is called outside of a `LocalSet`.
+    ///
+    /// [`LocalSet`]: crate::task::LocalSet
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn_local<F>(self, future: F) -> AbortHandle
+    where
+        F: Future<Output = T>,
+        F: 'static,
+    {
+        self.joinset.insert(self.builder.spawn_local(future))
+    }
+
+    /// Spawn the provided task on the provided [`LocalSet`] with this builder's
+    /// settings, and store it in the [`JoinSet`].
+    ///
+    /// # Returns
+    ///
+    /// An [`AbortHandle`] that can be used to remotely cancel the task.
+    ///
+    /// [`LocalSet`]: crate::task::LocalSet
+    /// [`AbortHandle`]: crate::task::AbortHandle
+    #[track_caller]
+    pub fn spawn_local_on<F>(self, future: F, local_set: &LocalSet) -> AbortHandle
+    where
+        F: Future<Output = T>,
+        F: 'static,
+    {
+        self.joinset
+            .insert(self.builder.spawn_local_on(future, local_set))
+    }
+}
+
+// Manual `Debug` impl so that `Builder` is `Debug` regardless of whether `T` is
+// `Debug`.
+#[cfg(all(tokio_unstable, feature = "tracing"))]
+#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))]
+impl<'a, T> fmt::Debug for Builder<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("join_set::Builder")
+            .field("joinset", &self.joinset)
+            .field("builder", &self.builder)
+            .finish()
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/src/task/local.rs b/third_party/rust_crates/vendor/tokio/src/task/local.rs
index 2dbd970..8c59876 100644
--- a/third_party/rust_crates/vendor/tokio/src/task/local.rs
+++ b/third_party/rust_crates/vendor/tokio/src/task/local.rs
@@ -301,18 +301,11 @@
     where F: Future + 'static,
           F::Output: 'static
     {
-        let future = crate::util::trace::task(future, "local", name);
         CURRENT.with(|maybe_cx| {
             let cx = maybe_cx
                 .expect("`spawn_local` called from outside of a `task::LocalSet`");
 
-            let (handle, notified) = cx.owned.bind(future, cx.shared.clone());
-
-            if let Some(notified) = notified {
-                cx.shared.schedule(notified);
-            }
-
-            handle
+            cx.spawn(future, name)
         })
     }
 }
@@ -385,16 +378,7 @@
         F: Future + 'static,
         F::Output: 'static,
     {
-        let future = crate::util::trace::task(future, "local", None);
-
-        let (handle, notified) = self.context.owned.bind(future, self.context.shared.clone());
-
-        if let Some(notified) = notified {
-            self.context.shared.schedule(notified);
-        }
-
-        self.context.shared.waker.wake();
-        handle
+        self.spawn_named(future, None)
     }
 
     /// Runs a future to completion on the provided runtime, driving any local
@@ -507,6 +491,27 @@
         run_until.await
     }
 
+    pub(in crate::task) fn spawn_named<F>(
+        &self,
+        future: F,
+        name: Option<&str>,
+    ) -> JoinHandle<F::Output>
+    where
+        F: Future + 'static,
+        F::Output: 'static,
+    {
+        let handle = self.context.spawn(future, name);
+
+        // Because a task was spawned from *outside* the `LocalSet`, wake the
+        // `LocalSet` future to execute the new task, if it hasn't been woken.
+        //
+        // Spawning via the free fn `spawn` does not require this, as it can
+        // only be called from *within* a future executing on the `LocalSet` —
+        // in that case, the `LocalSet` must already be awake.
+        self.context.shared.waker.wake();
+        handle
+    }
+
     /// Ticks the scheduler, returning whether the local future needs to be
     /// notified again.
     fn tick(&self) -> bool {
@@ -623,6 +628,28 @@
     }
 }
 
+// === impl Context ===
+
+impl Context {
+    #[track_caller]
+    fn spawn<F>(&self, future: F, name: Option<&str>) -> JoinHandle<F::Output>
+    where
+        F: Future + 'static,
+        F::Output: 'static,
+    {
+        let id = crate::runtime::task::Id::next();
+        let future = crate::util::trace::task(future, "local", name, id.as_u64());
+
+        let (handle, notified) = self.owned.bind(future, self.shared.clone(), id);
+
+        if let Some(notified) = notified {
+            self.shared.schedule(notified);
+        }
+
+        handle
+    }
+}
+
 // === impl LocalFuture ===
 
 impl<T: Future> Future for RunUntil<'_, T> {
diff --git a/third_party/rust_crates/vendor/tokio/src/task/mod.rs b/third_party/rust_crates/vendor/tokio/src/task/mod.rs
index d532155..8ea73fb 100644
--- a/third_party/rust_crates/vendor/tokio/src/task/mod.rs
+++ b/third_party/rust_crates/vendor/tokio/src/task/mod.rs
@@ -291,6 +291,11 @@
     mod yield_now;
     pub use yield_now::yield_now;
 
+    cfg_unstable! {
+        mod consume_budget;
+        pub use consume_budget::consume_budget;
+    }
+
     mod local;
     pub use local::{spawn_local, LocalSet};
 
@@ -301,8 +306,10 @@
     pub use unconstrained::{unconstrained, Unconstrained};
 
     cfg_unstable! {
-        mod join_set;
+        pub mod join_set;
+        #[doc(inline)]
         pub use join_set::JoinSet;
+        pub use crate::runtime::task::{Id, AbortHandle};
     }
 
     cfg_trace! {
diff --git a/third_party/rust_crates/vendor/tokio/src/task/spawn.rs b/third_party/rust_crates/vendor/tokio/src/task/spawn.rs
index a9d7366..5a60f9d 100644
--- a/third_party/rust_crates/vendor/tokio/src/task/spawn.rs
+++ b/third_party/rust_crates/vendor/tokio/src/task/spawn.rs
@@ -142,8 +142,10 @@
         T: Future + Send + 'static,
         T::Output: Send + 'static,
     {
-        let spawn_handle = crate::runtime::context::spawn_handle().expect(CONTEXT_MISSING_ERROR);
-        let task = crate::util::trace::task(future, "task", name);
-        spawn_handle.spawn(task)
+        use crate::runtime::{task, context};
+        let id = task::Id::next();
+        let spawn_handle = context::spawn_handle().expect(CONTEXT_MISSING_ERROR);
+        let task = crate::util::trace::task(future, "task", name, id.as_u64());
+        spawn_handle.spawn(task, id)
     }
 }
diff --git a/third_party/rust_crates/vendor/tokio/src/time/driver/sleep.rs b/third_party/rust_crates/vendor/tokio/src/time/driver/sleep.rs
index 7f27ef2..a629cb8 100644
--- a/third_party/rust_crates/vendor/tokio/src/time/driver/sleep.rs
+++ b/third_party/rust_crates/vendor/tokio/src/time/driver/sleep.rs
@@ -72,7 +72,9 @@
 ///
 /// No work is performed while awaiting on the sleep future to complete. `Sleep`
 /// operates at millisecond granularity and should not be used for tasks that
-/// require high-resolution timers.
+/// require high-resolution timers. The implementation is platform specific,
+/// and some platforms (specifically Windows) will provide timers with a
+/// larger resolution than 1 ms.
 ///
 /// To run something regularly on a schedule, see [`interval`].
 ///
@@ -261,7 +263,7 @@
         let inner = {
             let time_source = handle.time_source().clone();
             let deadline_tick = time_source.deadline_to_tick(deadline);
-            let duration = deadline_tick.checked_sub(time_source.now()).unwrap_or(0);
+            let duration = deadline_tick.saturating_sub(time_source.now());
 
             let location = location.expect("should have location if tracing");
             let resource_span = tracing::trace_span!(
@@ -373,7 +375,7 @@
             let duration = {
                 let now = me.inner.time_source.now();
                 let deadline_tick = me.inner.time_source.deadline_to_tick(deadline);
-                deadline_tick.checked_sub(now).unwrap_or(0)
+                deadline_tick.saturating_sub(now)
             };
 
             tracing::trace!(
diff --git a/third_party/rust_crates/vendor/tokio/src/time/interval.rs b/third_party/rust_crates/vendor/tokio/src/time/interval.rs
index 8ecb15b..0fe420f 100644
--- a/third_party/rust_crates/vendor/tokio/src/time/interval.rs
+++ b/third_party/rust_crates/vendor/tokio/src/time/interval.rs
@@ -207,6 +207,9 @@
     /// # async fn main() {
     /// let mut interval = interval(Duration::from_millis(50));
     ///
+    /// // First tick resolves immediately after creation
+    /// interval.tick().await;
+    ///
     /// task_that_takes_200_millis().await;
     /// // The `Interval` has missed a tick
     ///
diff --git a/third_party/rust_crates/vendor/tokio/src/util/slab.rs b/third_party/rust_crates/vendor/tokio/src/util/slab.rs
index 214fa08..0e16e40 100644
--- a/third_party/rust_crates/vendor/tokio/src/util/slab.rs
+++ b/third_party/rust_crates/vendor/tokio/src/util/slab.rs
@@ -551,10 +551,9 @@
     fn index_for(&self, slot: *const Value<T>) -> usize {
         use std::mem;
 
-        let base = &self.slots[0] as *const _ as usize;
+        assert_ne!(self.slots.capacity(), 0, "page is unallocated");
 
-        assert!(base != 0, "page is unallocated");
-
+        let base = self.slots.as_ptr() as usize;
         let slot = slot as usize;
         let width = mem::size_of::<Slot<T>>();
 
diff --git a/third_party/rust_crates/vendor/tokio/src/util/trace.rs b/third_party/rust_crates/vendor/tokio/src/util/trace.rs
index 6080e23..76e8a6c 100644
--- a/third_party/rust_crates/vendor/tokio/src/util/trace.rs
+++ b/third_party/rust_crates/vendor/tokio/src/util/trace.rs
@@ -10,7 +10,7 @@
 
         #[inline]
         #[track_caller]
-        pub(crate) fn task<F>(task: F, kind: &'static str, name: Option<&str>) -> Instrumented<F> {
+        pub(crate) fn task<F>(task: F, kind: &'static str, name: Option<&str>, id: u64) -> Instrumented<F> {
             use tracing::instrument::Instrument;
             let location = std::panic::Location::caller();
             let span = tracing::trace_span!(
@@ -18,6 +18,7 @@
                 "runtime.spawn",
                 %kind,
                 task.name = %name.unwrap_or_default(),
+                task.id = id,
                 loc.file = location.file(),
                 loc.line = location.line(),
                 loc.col = location.column(),
@@ -91,7 +92,7 @@
 cfg_not_trace! {
     cfg_rt! {
         #[inline]
-        pub(crate) fn task<F>(task: F, _: &'static str, _name: Option<&str>) -> F {
+        pub(crate) fn task<F>(task: F, _: &'static str, _name: Option<&str>, _: u64) -> F {
             // nop
             task
         }
diff --git a/third_party/rust_crates/vendor/tokio/tests/io_read.rs b/third_party/rust_crates/vendor/tokio/tests/io_read.rs
index cb1aa70..11da5a1 100644
--- a/third_party/rust_crates/vendor/tokio/tests/io_read.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/io_read.rs
@@ -8,6 +8,11 @@
 use std::pin::Pin;
 use std::task::{Context, Poll};
 
+mod support {
+    pub(crate) mod leaked_buffers;
+}
+use support::leaked_buffers::LeakedBuffers;
+
 #[tokio::test]
 async fn read() {
     #[derive(Default)]
@@ -37,16 +42,27 @@
     assert_eq!(buf[..], b"hello world"[..]);
 }
 
-struct BadAsyncRead;
+struct BadAsyncRead {
+    leaked_buffers: LeakedBuffers,
+}
+
+impl BadAsyncRead {
+    fn new() -> Self {
+        Self {
+            leaked_buffers: LeakedBuffers::new(),
+        }
+    }
+}
 
 impl AsyncRead for BadAsyncRead {
     fn poll_read(
-        self: Pin<&mut Self>,
+        mut self: Pin<&mut Self>,
         _cx: &mut Context<'_>,
         buf: &mut ReadBuf<'_>,
     ) -> Poll<io::Result<()>> {
-        *buf = ReadBuf::new(Box::leak(vec![0; buf.capacity()].into_boxed_slice()));
+        *buf = ReadBuf::new(unsafe { self.leaked_buffers.create(buf.capacity()) });
         buf.advance(buf.capacity());
+
         Poll::Ready(Ok(()))
     }
 }
@@ -55,5 +71,5 @@
 #[should_panic]
 async fn read_buf_bad_async_read() {
     let mut buf = Vec::with_capacity(10);
-    BadAsyncRead.read_buf(&mut buf).await.unwrap();
+    BadAsyncRead::new().read_buf(&mut buf).await.unwrap();
 }
diff --git a/third_party/rust_crates/vendor/tokio/tests/io_take.rs b/third_party/rust_crates/vendor/tokio/tests/io_take.rs
index 684e041..d623de1d 100644
--- a/third_party/rust_crates/vendor/tokio/tests/io_take.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/io_take.rs
@@ -6,6 +6,11 @@
 use tokio::io::{self, AsyncRead, AsyncReadExt, ReadBuf};
 use tokio_test::assert_ok;
 
+mod support {
+    pub(crate) mod leaked_buffers;
+}
+use support::leaked_buffers::LeakedBuffers;
+
 #[tokio::test]
 async fn take() {
     let mut buf = [0; 6];
@@ -34,17 +39,25 @@
     assert_eq!(&buf, &b"ABhell\0\0"[..]);
 }
 
-struct BadReader;
+struct BadReader {
+    leaked_buffers: LeakedBuffers,
+}
+
+impl BadReader {
+    fn new() -> Self {
+        Self {
+            leaked_buffers: LeakedBuffers::new(),
+        }
+    }
+}
 
 impl AsyncRead for BadReader {
     fn poll_read(
-        self: Pin<&mut Self>,
+        mut self: Pin<&mut Self>,
         _cx: &mut Context<'_>,
         read_buf: &mut ReadBuf<'_>,
     ) -> Poll<io::Result<()>> {
-        let vec = vec![0; 10];
-
-        let mut buf = ReadBuf::new(vec.leak());
+        let mut buf = ReadBuf::new(unsafe { self.leaked_buffers.create(10) });
         buf.put_slice(&[123; 10]);
         *read_buf = buf;
 
@@ -57,5 +70,5 @@
 async fn bad_reader_fails() {
     let mut buf = Vec::with_capacity(10);
 
-    BadReader.take(10).read_buf(&mut buf).await.unwrap();
+    BadReader::new().take(10).read_buf(&mut buf).await.unwrap();
 }
diff --git a/third_party/rust_crates/vendor/tokio/tests/macros_join.rs b/third_party/rust_crates/vendor/tokio/tests/macros_join.rs
index d4f20b3..56bd9c5 100644
--- a/third_party/rust_crates/vendor/tokio/tests/macros_join.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/macros_join.rs
@@ -1,5 +1,6 @@
 #![cfg(feature = "macros")]
 #![allow(clippy::blacklisted_name)]
+use std::sync::Arc;
 
 #[cfg(target_arch = "wasm32")]
 use wasm_bindgen_test::wasm_bindgen_test as test;
@@ -9,7 +10,7 @@
 #[cfg(not(target_arch = "wasm32"))]
 use tokio::test as maybe_tokio_test;
 
-use tokio::sync::oneshot;
+use tokio::sync::{oneshot, Semaphore};
 use tokio_test::{assert_pending, assert_ready, task};
 
 #[maybe_tokio_test]
@@ -71,12 +72,82 @@
         let ready = future::ready(0i32);
         tokio::join!(ready)
     };
-    assert_eq!(mem::size_of_val(&fut), 16);
+    assert_eq!(mem::size_of_val(&fut), 20);
 
     let fut = async {
         let ready1 = future::ready(0i32);
         let ready2 = future::ready(0i32);
         tokio::join!(ready1, ready2)
     };
-    assert_eq!(mem::size_of_val(&fut), 28);
+    assert_eq!(mem::size_of_val(&fut), 32);
+}
+
+async fn non_cooperative_task(permits: Arc<Semaphore>) -> usize {
+    let mut exceeded_budget = 0;
+
+    for _ in 0..5 {
+        // Another task should run after after this task uses its whole budget
+        for _ in 0..128 {
+            let _permit = permits.clone().acquire_owned().await.unwrap();
+        }
+
+        exceeded_budget += 1;
+    }
+
+    exceeded_budget
+}
+
+async fn poor_little_task(permits: Arc<Semaphore>) -> usize {
+    let mut how_many_times_i_got_to_run = 0;
+
+    for _ in 0..5 {
+        let _permit = permits.clone().acquire_owned().await.unwrap();
+        how_many_times_i_got_to_run += 1;
+    }
+
+    how_many_times_i_got_to_run
+}
+
+#[tokio::test]
+async fn join_does_not_allow_tasks_to_starve() {
+    let permits = Arc::new(Semaphore::new(1));
+
+    // non_cooperative_task should yield after its budget is exceeded and then poor_little_task should run.
+    let (non_cooperative_result, little_task_result) = tokio::join!(
+        non_cooperative_task(Arc::clone(&permits)),
+        poor_little_task(permits)
+    );
+
+    assert_eq!(5, non_cooperative_result);
+    assert_eq!(5, little_task_result);
+}
+
+#[tokio::test]
+async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() {
+    let poll_order = Arc::new(std::sync::Mutex::new(vec![]));
+
+    let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move {
+        for _ in 0..4 {
+            {
+                let mut guard = poll_order.lock().unwrap();
+
+                guard.push(x);
+            }
+
+            tokio::task::yield_now().await;
+        }
+    };
+
+    tokio::join!(
+        fut(1, Arc::clone(&poll_order)),
+        fut(2, Arc::clone(&poll_order)),
+        fut(3, Arc::clone(&poll_order)),
+    );
+
+    // Each time the future created by join! is polled, it should start
+    // by polling a different future first.
+    assert_eq!(
+        vec![1, 2, 3, 2, 3, 1, 3, 1, 2, 1, 2, 3],
+        *poll_order.lock().unwrap()
+    );
 }
diff --git a/third_party/rust_crates/vendor/tokio/tests/macros_rename_test.rs b/third_party/rust_crates/vendor/tokio/tests/macros_rename_test.rs
new file mode 100644
index 0000000..fd5554c
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/tests/macros_rename_test.rs
@@ -0,0 +1,26 @@
+#![cfg(feature = "full")]
+
+#[allow(unused_imports)]
+use std as tokio;
+
+use ::tokio as tokio1;
+
+async fn compute() -> usize {
+    let join = tokio1::spawn(async { 1 });
+    join.await.unwrap()
+}
+
+#[tokio1::main(crate = "tokio1")]
+async fn compute_main() -> usize {
+    compute().await
+}
+
+#[test]
+fn crate_rename_main() {
+    assert_eq!(1, compute_main());
+}
+
+#[tokio1::test(crate = "tokio1")]
+async fn crate_rename_test() {
+    assert_eq!(1, compute().await);
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/macros_select.rs b/third_party/rust_crates/vendor/tokio/tests/macros_select.rs
index 755365a..c60a4a9 100644
--- a/third_party/rust_crates/vendor/tokio/tests/macros_select.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/macros_select.rs
@@ -461,6 +461,7 @@
         x = async { 1 } => x,
         x = async { 1 } => x,
         x = async { 1 } => x,
+        x = async { 1 } => x,
     };
 
     assert_eq!(1, num);
diff --git a/third_party/rust_crates/vendor/tokio/tests/macros_try_join.rs b/third_party/rust_crates/vendor/tokio/tests/macros_try_join.rs
index 60a726b..556436d 100644
--- a/third_party/rust_crates/vendor/tokio/tests/macros_try_join.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/macros_try_join.rs
@@ -1,7 +1,9 @@
 #![cfg(feature = "macros")]
 #![allow(clippy::blacklisted_name)]
 
-use tokio::sync::oneshot;
+use std::sync::Arc;
+
+use tokio::sync::{oneshot, Semaphore};
 use tokio_test::{assert_pending, assert_ready, task};
 
 #[cfg(target_arch = "wasm32")]
@@ -94,16 +96,89 @@
         let ready = future::ready(ok(0i32));
         tokio::try_join!(ready)
     };
-    assert_eq!(mem::size_of_val(&fut), 16);
+    assert_eq!(mem::size_of_val(&fut), 20);
 
     let fut = async {
         let ready1 = future::ready(ok(0i32));
         let ready2 = future::ready(ok(0i32));
         tokio::try_join!(ready1, ready2)
     };
-    assert_eq!(mem::size_of_val(&fut), 28);
+    assert_eq!(mem::size_of_val(&fut), 32);
 }
 
 fn ok<T>(val: T) -> Result<T, ()> {
     Ok(val)
 }
+
+async fn non_cooperative_task(permits: Arc<Semaphore>) -> Result<usize, String> {
+    let mut exceeded_budget = 0;
+
+    for _ in 0..5 {
+        // Another task should run after after this task uses its whole budget
+        for _ in 0..128 {
+            let _permit = permits.clone().acquire_owned().await.unwrap();
+        }
+
+        exceeded_budget += 1;
+    }
+
+    Ok(exceeded_budget)
+}
+
+async fn poor_little_task(permits: Arc<Semaphore>) -> Result<usize, String> {
+    let mut how_many_times_i_got_to_run = 0;
+
+    for _ in 0..5 {
+        let _permit = permits.clone().acquire_owned().await.unwrap();
+
+        how_many_times_i_got_to_run += 1;
+    }
+
+    Ok(how_many_times_i_got_to_run)
+}
+
+#[tokio::test]
+async fn try_join_does_not_allow_tasks_to_starve() {
+    let permits = Arc::new(Semaphore::new(10));
+
+    // non_cooperative_task should yield after its budget is exceeded and then poor_little_task should run.
+    let result = tokio::try_join!(
+        non_cooperative_task(Arc::clone(&permits)),
+        poor_little_task(permits)
+    );
+
+    let (non_cooperative_result, little_task_result) = result.unwrap();
+
+    assert_eq!(5, non_cooperative_result);
+    assert_eq!(5, little_task_result);
+}
+
+#[tokio::test]
+async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() {
+    let poll_order = Arc::new(std::sync::Mutex::new(vec![]));
+
+    let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move {
+        for _ in 0..4 {
+            {
+                let mut guard = poll_order.lock().unwrap();
+
+                guard.push(x);
+            }
+
+            tokio::task::yield_now().await;
+        }
+    };
+
+    tokio::join!(
+        fut(1, Arc::clone(&poll_order)),
+        fut(2, Arc::clone(&poll_order)),
+        fut(3, Arc::clone(&poll_order)),
+    );
+
+    // Each time the future created by join! is polled, it should start
+    // by polling a different future first.
+    assert_eq!(
+        vec![1, 2, 3, 2, 3, 1, 3, 1, 2, 1, 2, 3],
+        *poll_order.lock().unwrap()
+    );
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/rt_common.rs b/third_party/rust_crates/vendor/tokio/tests/rt_common.rs
index cb1d0f6..14e1909 100644
--- a/third_party/rust_crates/vendor/tokio/tests/rt_common.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/rt_common.rs
@@ -748,7 +748,11 @@
 
     #[test]
     fn wake_while_rt_is_dropping() {
-        use tokio::task;
+        use tokio::sync::Barrier;
+        use core::sync::atomic::{AtomicBool, Ordering};
+
+        let drop_triggered = Arc::new(AtomicBool::new(false));
+        let set_drop_triggered = drop_triggered.clone();
 
         struct OnDrop<F: FnMut()>(F);
 
@@ -762,17 +766,21 @@
         let (tx2, rx2) = oneshot::channel();
         let (tx3, rx3) = oneshot::channel();
 
-        let rt = rt();
+        let barrier = Arc::new(Barrier::new(4));
+        let barrier1 = barrier.clone();
+        let barrier2 = barrier.clone();
+        let barrier3 = barrier.clone();
 
-        let h1 = rt.clone();
+        let rt = rt();
 
         rt.spawn(async move {
             // Ensure a waker gets stored in oneshot 1.
-            let _ = rx1.await;
+            let _ = tokio::join!(rx1, barrier1.wait());
             tx3.send(()).unwrap();
         });
 
         rt.spawn(async move {
+            let h1 = tokio::runtime::Handle::current();
             // When this task is dropped, we'll be "closing remotes".
             // We spawn a new task that owns the `tx1`, to move its Drop
             // out of here.
@@ -785,24 +793,27 @@
                 h1.spawn(async move {
                     tx1.send(()).unwrap();
                 });
+                // Just a sanity check that this entire thing actually happened
+                set_drop_triggered.store(true, Ordering::Relaxed);
             });
-            let _ = rx2.await;
+            let _ = tokio::join!(rx2, barrier2.wait());
         });
 
         rt.spawn(async move {
-            let _ = rx3.await;
+            let _ = tokio::join!(rx3, barrier3.wait());
             // We'll never get here, but once task 3 drops, this will
             // force task 2 to re-schedule since it's waiting on oneshot 2.
             tx2.send(()).unwrap();
         });
 
-        // Tick the loop
-        rt.block_on(async {
-            task::yield_now().await;
-        });
+        // Wait until every oneshot channel has been polled.
+        rt.block_on(barrier.wait());
 
         // Drop the rt
         drop(rt);
+
+        // Make sure that the spawn actually happened
+        assert!(drop_triggered.load(Ordering::Relaxed));
     }
 
     #[test]
@@ -1043,6 +1054,31 @@
         });
     }
 
+    #[cfg(tokio_unstable)]
+    #[test]
+    fn coop_consume_budget() {
+        let rt = rt();
+
+        rt.block_on(async {
+            poll_fn(|cx| {
+                let counter = Arc::new(std::sync::Mutex::new(0));
+                let counter_clone = Arc::clone(&counter);
+                let mut worker = Box::pin(async move {
+                    // Consume the budget until a yield happens
+                    for _ in 0..1000 {
+                        *counter.lock().unwrap() += 1;
+                        task::consume_budget().await
+                    }
+                });
+                // Assert that the worker was yielded and it didn't manage
+                // to finish the whole work (assuming the total budget of 128)
+                assert!(Pin::new(&mut worker).poll(cx).is_pending());
+                assert!(*counter_clone.lock().unwrap() < 1000);
+                std::task::Poll::Ready(())
+            }).await;
+        });
+    }
+
     // Tests that the "next task" scheduler optimization is not able to starve
     // other tasks.
     #[test]
diff --git a/third_party/rust_crates/vendor/tokio/tests/rt_metrics.rs b/third_party/rust_crates/vendor/tokio/tests/rt_metrics.rs
index 0a26b80..1521cd2 100644
--- a/third_party/rust_crates/vendor/tokio/tests/rt_metrics.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/rt_metrics.rs
@@ -369,6 +369,40 @@
     });
 }
 
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+#[test]
+fn io_driver_fd_count() {
+    let rt = basic();
+    let metrics = rt.metrics();
+
+    // Since this is enabled w/ the process driver we always
+    // have 1 fd registered.
+    assert_eq!(metrics.io_driver_fd_registered_count(), 1);
+
+    let stream = tokio::net::TcpStream::connect("google.com:80");
+    let stream = rt.block_on(async move { stream.await.unwrap() });
+
+    assert_eq!(metrics.io_driver_fd_registered_count(), 2);
+    assert_eq!(metrics.io_driver_fd_deregistered_count(), 0);
+
+    drop(stream);
+
+    assert_eq!(metrics.io_driver_fd_deregistered_count(), 1);
+    assert_eq!(metrics.io_driver_fd_registered_count(), 2);
+}
+
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+#[test]
+fn io_driver_ready_count() {
+    let rt = basic();
+    let metrics = rt.metrics();
+
+    let stream = tokio::net::TcpStream::connect("google.com:80");
+    let _stream = rt.block_on(async move { stream.await.unwrap() });
+
+    assert_eq!(metrics.io_driver_ready_count(), 2);
+}
+
 fn basic() -> Runtime {
     tokio::runtime::Builder::new_current_thread()
         .enable_all()
diff --git a/third_party/rust_crates/vendor/tokio/tests/support/leaked_buffers.rs b/third_party/rust_crates/vendor/tokio/tests/support/leaked_buffers.rs
new file mode 100644
index 0000000..3ee8a18
--- /dev/null
+++ b/third_party/rust_crates/vendor/tokio/tests/support/leaked_buffers.rs
@@ -0,0 +1,26 @@
+/// Can create buffers of arbitrary lifetime.
+/// Frees created buffers when dropped.
+///
+/// This struct is of course unsafe and the fact that
+/// it must outlive the created slices has to be ensured by
+/// the programmer.
+///
+/// Used at certain test scenarios as a safer version of
+/// Vec::leak, to satisfy the address sanitizer.
+pub struct LeakedBuffers {
+    leaked_vecs: Vec<Box<[u8]>>,
+}
+
+impl LeakedBuffers {
+    pub fn new() -> Self {
+        Self {
+            leaked_vecs: vec![],
+        }
+    }
+    pub unsafe fn create<'a>(&mut self, size: usize) -> &'a mut [u8] {
+        let mut new_mem = vec![0u8; size].into_boxed_slice();
+        let slice = std::slice::from_raw_parts_mut(new_mem.as_mut_ptr(), new_mem.len());
+        self.leaked_vecs.push(new_mem);
+        slice
+    }
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/sync_broadcast.rs b/third_party/rust_crates/vendor/tokio/tests/sync_broadcast.rs
index 1b68eb7..b38b638 100644
--- a/third_party/rust_crates/vendor/tokio/tests/sync_broadcast.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/sync_broadcast.rs
@@ -457,6 +457,63 @@
     assert_empty!(rx);
 }
 
+#[test]
+fn receiver_len_with_lagged() {
+    let (tx, mut rx) = broadcast::channel(3);
+
+    tx.send(10).unwrap();
+    tx.send(20).unwrap();
+    tx.send(30).unwrap();
+    tx.send(40).unwrap();
+
+    assert_eq!(rx.len(), 4);
+    assert_eq!(assert_recv!(rx), 10);
+
+    tx.send(50).unwrap();
+    tx.send(60).unwrap();
+
+    assert_eq!(rx.len(), 5);
+    assert_lagged!(rx.try_recv(), 1);
+}
+
 fn is_closed(err: broadcast::error::RecvError) -> bool {
     matches!(err, broadcast::error::RecvError::Closed)
 }
+
+#[test]
+fn resubscribe_points_to_tail() {
+    let (tx, mut rx) = broadcast::channel(3);
+    tx.send(1).unwrap();
+
+    let mut rx_resub = rx.resubscribe();
+
+    // verify we're one behind at the start
+    assert_empty!(rx_resub);
+    assert_eq!(assert_recv!(rx), 1);
+
+    // verify we do not affect rx
+    tx.send(2).unwrap();
+    assert_eq!(assert_recv!(rx_resub), 2);
+    tx.send(3).unwrap();
+    assert_eq!(assert_recv!(rx), 2);
+    assert_eq!(assert_recv!(rx), 3);
+    assert_empty!(rx);
+
+    assert_eq!(assert_recv!(rx_resub), 3);
+    assert_empty!(rx_resub);
+}
+
+#[test]
+fn resubscribe_lagged() {
+    let (tx, mut rx) = broadcast::channel(1);
+    tx.send(1).unwrap();
+    tx.send(2).unwrap();
+
+    let mut rx_resub = rx.resubscribe();
+    assert_lagged!(rx.try_recv(), 1);
+    assert_empty!(rx_resub);
+
+    assert_eq!(assert_recv!(rx), 2);
+    assert_empty!(rx);
+    assert_empty!(rx_resub);
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/sync_mutex.rs b/third_party/rust_crates/vendor/tokio/tests/sync_mutex.rs
index 51dbe03..bcd9b1e 100644
--- a/third_party/rust_crates/vendor/tokio/tests/sync_mutex.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/sync_mutex.rs
@@ -155,7 +155,7 @@
         let g1 = m.try_lock();
         assert!(g1.is_ok());
         let g2 = m.try_lock();
-        assert!(!g2.is_ok());
+        assert!(g2.is_err());
     }
     let g3 = m.try_lock();
     assert!(g3.is_ok());
diff --git a/third_party/rust_crates/vendor/tokio/tests/sync_mutex_owned.rs b/third_party/rust_crates/vendor/tokio/tests/sync_mutex_owned.rs
index 2ce15de..98ced15 100644
--- a/third_party/rust_crates/vendor/tokio/tests/sync_mutex_owned.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/sync_mutex_owned.rs
@@ -122,7 +122,7 @@
         let g1 = m.clone().try_lock_owned();
         assert!(g1.is_ok());
         let g2 = m.clone().try_lock_owned();
-        assert!(!g2.is_ok());
+        assert!(g2.is_err());
     }
     let g3 = m.try_lock_owned();
     assert!(g3.is_ok());
diff --git a/third_party/rust_crates/vendor/tokio/tests/sync_notify.rs b/third_party/rust_crates/vendor/tokio/tests/sync_notify.rs
index 5318d13..4236a91 100644
--- a/third_party/rust_crates/vendor/tokio/tests/sync_notify.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/sync_notify.rs
@@ -154,3 +154,74 @@
 
     assert_ready!(notified2.poll());
 }
+
+#[test]
+fn test_notify_one_not_enabled() {
+    let notify = Notify::new();
+    let mut future = spawn(notify.notified());
+
+    notify.notify_one();
+    assert_ready!(future.poll());
+}
+
+#[test]
+fn test_notify_one_after_enable() {
+    let notify = Notify::new();
+    let mut future = spawn(notify.notified());
+
+    future.enter(|_, fut| assert!(!fut.enable()));
+
+    notify.notify_one();
+    assert_ready!(future.poll());
+    future.enter(|_, fut| assert!(fut.enable()));
+}
+
+#[test]
+fn test_poll_after_enable() {
+    let notify = Notify::new();
+    let mut future = spawn(notify.notified());
+
+    future.enter(|_, fut| assert!(!fut.enable()));
+    assert_pending!(future.poll());
+}
+
+#[test]
+fn test_enable_after_poll() {
+    let notify = Notify::new();
+    let mut future = spawn(notify.notified());
+
+    assert_pending!(future.poll());
+    future.enter(|_, fut| assert!(!fut.enable()));
+}
+
+#[test]
+fn test_enable_consumes_permit() {
+    let notify = Notify::new();
+
+    // Add a permit.
+    notify.notify_one();
+
+    let mut future1 = spawn(notify.notified());
+    future1.enter(|_, fut| assert!(fut.enable()));
+
+    let mut future2 = spawn(notify.notified());
+    future2.enter(|_, fut| assert!(!fut.enable()));
+}
+
+#[test]
+fn test_waker_update() {
+    use futures::task::noop_waker;
+    use std::future::Future;
+    use std::task::Context;
+
+    let notify = Notify::new();
+    let mut future = spawn(notify.notified());
+
+    let noop = noop_waker();
+    future.enter(|_, fut| assert_pending!(fut.poll(&mut Context::from_waker(&noop))));
+
+    assert_pending!(future.poll());
+    notify.notify_one();
+
+    assert!(future.is_woken());
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/sync_watch.rs b/third_party/rust_crates/vendor/tokio/tests/sync_watch.rs
index 8b9ea81..d47f0df 100644
--- a/third_party/rust_crates/vendor/tokio/tests/sync_watch.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/sync_watch.rs
@@ -211,3 +211,32 @@
     drop(rx);
     assert!(tx.is_closed());
 }
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn send_modify_panic() {
+    let (tx, mut rx) = watch::channel("one");
+
+    tx.send_modify(|old| *old = "two");
+    assert_eq!(*rx.borrow_and_update(), "two");
+
+    let mut rx2 = rx.clone();
+    assert_eq!(*rx2.borrow_and_update(), "two");
+
+    let mut task = spawn(rx2.changed());
+
+    let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+        tx.send_modify(|old| {
+            *old = "panicked";
+            panic!();
+        })
+    }));
+    assert!(result.is_err());
+
+    assert_pending!(task.poll());
+    assert_eq!(*rx.borrow(), "panicked");
+
+    tx.send_modify(|old| *old = "three");
+    assert_ready_ok!(task.poll());
+    assert_eq!(*rx.borrow_and_update(), "three");
+}
diff --git a/third_party/rust_crates/vendor/tokio/tests/task_join_set.rs b/third_party/rust_crates/vendor/tokio/tests/task_join_set.rs
index 66a2fbb..d016e00 100644
--- a/third_party/rust_crates/vendor/tokio/tests/task_join_set.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/task_join_set.rs
@@ -24,7 +24,7 @@
     set.detach_all();
     assert_eq!(set.len(), 0);
 
-    assert!(matches!(set.join_one().await, Ok(None)));
+    assert!(matches!(set.join_one().await, None));
 
     for i in 0..10 {
         set.spawn(async move {
@@ -35,14 +35,14 @@
     }
 
     let mut seen = [false; 10];
-    while let Some(res) = set.join_one().await.unwrap() {
+    while let Some(res) = set.join_one().await.transpose().unwrap() {
         seen[res] = true;
     }
 
     for was_seen in &seen {
         assert!(was_seen);
     }
-    assert!(matches!(set.join_one().await, Ok(None)));
+    assert!(matches!(set.join_one().await, None));
 
     // Do it again.
     for i in 0..10 {
@@ -53,14 +53,14 @@
     }
 
     let mut seen = [false; 10];
-    while let Some(res) = set.join_one().await.unwrap() {
+    while let Some(res) = set.join_one().await.transpose().unwrap() {
         seen[res] = true;
     }
 
     for was_seen in &seen {
         assert!(was_seen);
     }
-    assert!(matches!(set.join_one().await, Ok(None)));
+    assert!(matches!(set.join_one().await, None));
 }
 
 #[tokio::test]
@@ -106,6 +106,40 @@
     }
 }
 
+#[tokio::test(start_paused = true)]
+async fn abort_tasks() {
+    let mut set = JoinSet::new();
+    let mut num_canceled = 0;
+    let mut num_completed = 0;
+    for i in 0..16 {
+        let abort = set.spawn(async move {
+            tokio::time::sleep(Duration::from_secs(i as u64)).await;
+            i
+        });
+
+        if i % 2 != 0 {
+            // abort odd-numbered tasks.
+            abort.abort();
+        }
+    }
+    loop {
+        match set.join_one().await {
+            Some(Ok(res)) => {
+                num_completed += 1;
+                assert_eq!(res % 2, 0);
+            }
+            Some(Err(e)) => {
+                assert!(e.is_cancelled());
+                num_canceled += 1;
+            }
+            None => break,
+        }
+    }
+
+    assert_eq!(num_canceled, 8);
+    assert_eq!(num_completed, 8);
+}
+
 #[test]
 fn runtime_gone() {
     let mut set = JoinSet::new();
@@ -115,7 +149,11 @@
         drop(rt);
     }
 
-    assert!(rt().block_on(set.join_one()).unwrap_err().is_cancelled());
+    assert!(rt()
+        .block_on(set.join_one())
+        .unwrap()
+        .unwrap_err()
+        .is_cancelled());
 }
 
 // This ensures that `join_one` works correctly when the coop budget is
@@ -145,14 +183,14 @@
     let mut coop_count = 0;
     loop {
         match set.join_one().now_or_never() {
-            Some(Ok(Some(()))) => {}
-            Some(Err(err)) => panic!("failed: {}", err),
+            Some(Some(Ok(()))) => {}
+            Some(Some(Err(err))) => panic!("failed: {}", err),
             None => {
                 coop_count += 1;
                 tokio::task::yield_now().await;
                 continue;
             }
-            Some(Ok(None)) => break,
+            Some(None) => break,
         }
 
         count += 1;
@@ -181,7 +219,7 @@
     assert_eq!(set.len(), 10);
 
     let mut count = 0;
-    while let Some(res) = set.join_one().await.transpose() {
+    while let Some(res) = set.join_one().await {
         if let Err(err) = res {
             assert!(err.is_cancelled());
         }
diff --git a/third_party/rust_crates/vendor/tokio/tests/uds_stream.rs b/third_party/rust_crates/vendor/tokio/tests/uds_stream.rs
index 5f1b4cff..b8c4e6a 100644
--- a/third_party/rust_crates/vendor/tokio/tests/uds_stream.rs
+++ b/third_party/rust_crates/vendor/tokio/tests/uds_stream.rs
@@ -25,13 +25,13 @@
     let connect = UnixStream::connect(&sock_path);
     let ((mut server, _), mut client) = try_join(accept, connect).await?;
 
-    // Write to the client. TODO: Switch to write_all.
-    let write_len = client.write(b"hello").await?;
-    assert_eq!(write_len, 5);
+    // Write to the client.
+    client.write_all(b"hello").await?;
     drop(client);
-    // Read from the server. TODO: Switch to read_to_end.
-    let mut buf = [0u8; 5];
-    server.read_exact(&mut buf).await?;
+
+    // Read from the server.
+    let mut buf = vec![];
+    server.read_to_end(&mut buf).await?;
     assert_eq!(&buf, b"hello");
     let len = server.read(&mut buf).await?;
     assert_eq!(len, 0);
diff --git a/third_party/rust_crates/vendor/tracing-mutex/bors.toml b/third_party/rust_crates/vendor/tracing-mutex/bors.toml
new file mode 100644
index 0000000..6f20720
--- /dev/null
+++ b/third_party/rust_crates/vendor/tracing-mutex/bors.toml
@@ -0,0 +1,5 @@
+status = [
+    'Rust project (stable)',
+    'Rust project (beta)',
+    'Documentation build',
+]