[rust] Bump parking_lot to 0.6.4

This also indirectly:

* adds lock_api 0.1.5
* bumps owning_ref to 0.4.0
* bumps parking_lot_core to 0.3.1
* adds rustc_version 0.2.3
* adds scopeguard 0.3.3
* adds semver 0.9.0
* adds semver-parser 0.7.0

OSRB-91 #comment

Change-Id: I391fc21b72414d6761b766d985fa6a001626d1cb
diff --git a/rustc_deps/Cargo.lock b/rustc_deps/Cargo.lock
index c8f224d..3e7ec90 100644
--- a/rustc_deps/Cargo.lock
+++ b/rustc_deps/Cargo.lock
@@ -418,7 +418,7 @@
  "nom 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-bigint 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "pin-utils 0.1.0-alpha.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -674,6 +674,15 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "lock_api"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "log"
 version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -857,7 +866,7 @@
 
 [[package]]
 name = "owning_ref"
-version = "0.3.3"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -865,20 +874,21 @@
 
 [[package]]
 name = "parking_lot"
-version = "0.4.8"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot_core"
-version = "0.2.14"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.6",
 ]
@@ -1106,6 +1116,14 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "ryu"
 version = "0.2.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1129,6 +1147,24 @@
 ]
 
 [[package]]
+name = "scopeguard"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "serde"
 version = "1.0.80"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1679,6 +1715,7 @@
 "checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
 "checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1"
 "checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"
+"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"
 "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
 "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
 "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
@@ -1699,9 +1736,9 @@
 "checksum num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4e96f040177bb3da242b5b1ecf3f54b5d5af3efbbfb18608977a5d2767b22f10"
 "checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
 "checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
-"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
-"checksum parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "149d8f5b97f3c1133e3cfcd8886449959e856b557ff281e292b733d7c69e005e"
-"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"
+"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
+"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"
+"checksum parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad7f7e6ebdc79edff6fdcb87a55b620174f7a989e3eb31b65231f4af57f00b8c"
 "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831"
 "checksum phf 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)" = "cec29da322b242f4c3098852c77a0ca261c9c01b806cae85a5572a1eb94db9a6"
 "checksum phf_codegen 0.7.23 (registry+https://github.com/rust-lang/crates.io-index)" = "7d187f00cd98d5afbcd8898f6cf181743a449162aeb329dcd2f3849009e605ad"
@@ -1726,10 +1763,14 @@
 "checksum rouille 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0845b9c39ba772da769fe2aaa4d81bfd10695a7ea051d0510702260ff4159841"
 "checksum rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "bcfe5b13211b4d78e5c2cadfebd7769197d95c639c35a50057eb4c05de811395"
 "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
+"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
 "checksum ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "eb9e9b8cde282a9fe6a42dd4681319bfb63f121b8a8ee9439c6f4107e58a46f7"
 "checksum safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e27a8b19b835f7aea908818e871f5cc3a5a186550c30773be987e155e8163d8f"
 "checksum safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dca453248a96cb0749e36ccdfe2b0b4e54a61bfef89fb97ec621eb8e0a93dd9"
 "checksum same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8f20c4be53a8a1ff4c1f1b2bd14570d2f634628709752f0702ecdd2b3f9a5267"
+"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27"
+"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
 "checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
 "checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
 "checksum serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)" = "c37ccd6be3ed1fdf419ee848f7c758eb31b054d7cd3ae3600e3bae0adf569811"
diff --git a/rustc_deps/Cargo.toml b/rustc_deps/Cargo.toml
index 1336087..2e80302 100644
--- a/rustc_deps/Cargo.toml
+++ b/rustc_deps/Cargo.toml
@@ -35,7 +35,7 @@
 nom = "3.2"
 num = "0.2"
 num-bigint = { version = "0.2", features = ["rand"] }
-parking_lot = "0.4"
+parking_lot = "0.6"
 pin-utils = "=0.1.0-alpha.3"
 pretty_assertions = "0.5.1"
 rand = "0.5"
diff --git a/rustc_deps/vendor/lock_api/.cargo-checksum.json b/rustc_deps/vendor/lock_api/.cargo-checksum.json
new file mode 100644
index 0000000..b8be5a0
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"ab2a7a96105e15de46900fb0da37edbab44e5513a9818672153dae44ed318f7e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"4a16128f58e3380b22b26b137ee1096732995b7e401f3d227dd7b0738b6bd604","src/mutex.rs":"fee397f72325621812c5f78c7a6b9369ea7ec14e71bb0049678a50349519c0c7","src/remutex.rs":"ed76d7b93a56b6248d79676de2aaa66b607b64f1b773c9dd7326b8324e2bc71a","src/rwlock.rs":"5ab1aab614358cfdaf23e8ff8a0ac5e0c7656b777f385aca2e5422f0aa8f0985"},"package":"62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/lock_api/Cargo.toml b/rustc_deps/vendor/lock_api/Cargo.toml
new file mode 100644
index 0000000..ee39d85
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "lock_api"
+version = "0.1.5"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
+keywords = ["mutex", "rwlock", "lock", "no_std"]
+categories = ["concurrency", "no-std"]
+license = "Apache-2.0/MIT"
+repository = "https://github.com/Amanieu/parking_lot"
+[dependencies.owning_ref]
+version = "0.4"
+optional = true
+
+[dependencies.scopeguard]
+version = "0.3"
+default-features = false
+
+[features]
+nightly = []
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/lock_api/LICENSE-APACHE
similarity index 88%
copy from rustc_deps/vendor/parking_lot_core/LICENSE
copy to rustc_deps/vendor/lock_api/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/lock_api/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/lock_api/LICENSE-MIT b/rustc_deps/vendor/lock_api/LICENSE-MIT
new file mode 100644
index 0000000..40b8817
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/lock_api/src/lib.rs b/rustc_deps/vendor/lock_api/src/lib.rs
new file mode 100644
index 0000000..60271ed
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/src/lib.rs
@@ -0,0 +1,109 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! This library provides type-safe and fully-featured `Mutex` and `RwLock`
+//! types which wrap a simple raw mutex or rwlock type. This has several
+//! benefits: not only does it eliminate a large portion of the work in
+//! implementing custom lock types, it also allows users to write code which is
+//! generic with regards to different lock implementations.
+//!
+//! Basic usage of this crate is very straightforward:
+//!
+//! 1. Create a raw lock type. This should only contain the lock state, not any
+//!    data protected by the lock.
+//! 2. Implement the `RawMutex` trait for your custom lock type.
+//! 3. Export your mutex as a type alias for `lock_api::Mutex`, and
+//!    your mutex guard as a type alias for `lock_api::MutexGuard`.
+//!    See the [example](#example) below for details.
+//!
+//! This process is similar for RwLocks, except that two guards need to be
+//! exported instead of one. (Or 3 guards if your type supports upgradable read
+//! locks, see [extension traits](#extension-traits) below for details)
+//!
+//! # Example
+//!
+//! ```
+//! use lock_api::{RawMutex, Mutex, GuardSend};
+//! use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
+//!
+//! // 1. Define our raw lock type
+//! pub struct RawSpinlock(AtomicBool);
+//!
+//! // 2. Implement RawMutex for this type
+//! unsafe impl RawMutex for RawSpinlock {
+//!     const INIT: RawSpinlock = RawSpinlock(ATOMIC_BOOL_INIT);
+//!
+//!     // A spinlock guard can be sent to another thread and unlocked there
+//!     type GuardMarker = GuardSend;
+//!
+//!     fn lock(&self) {
+//!         // Note: This isn't the best way of implementing a spinlock, but it
+//!         // suffices for the sake of this example.
+//!         while !self.try_lock() {}
+//!     }
+//!
+//!     fn try_lock(&self) -> bool {
+//!         self.0.swap(true, Ordering::Acquire)
+//!     }
+//!
+//!     fn unlock(&self) {
+//!         self.0.store(false, Ordering::Release);
+//!     }
+//! }
+//!
+//! // 3. Export the wrappers. This are the types that your users will actually use.
+//! pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;
+//! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;
+//! ```
+//!
+//! # Extension traits
+//!
+//! In addition to basic locking & unlocking functionality, you have the option
+//! of exposing additional functionality in your lock types by implementing
+//! additional traits for it. Examples of extension features include:
+//!
+//! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`)
+//! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`)
+//! - Downgradable write locks (`RawRwLockDowngradable`)
+//! - Recursive read locks (`RawRwLockRecursive`)
+//! - Upgradable read locks (`RawRwLockUpgrade`)
+//!
+//! The `Mutex` and `RwLock` wrappers will automatically expose this additional
+//! functionality if the raw lock type implements these extension traits.
+//!
+//! # Cargo features
+//!
+//! This crate supports two cargo features:
+//!
+//! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate.
+//! - `nightly`: Enables nightly-only features. At the moment the only such
+//!   feature is `const fn` constructors for lock types.
+
+#![no_std]
+#![warn(missing_docs)]
+#![cfg_attr(feature = "nightly", feature(const_fn))]
+
+#[macro_use]
+extern crate scopeguard;
+
+#[cfg(feature = "owning_ref")]
+extern crate owning_ref;
+
+/// Marker type which indicates that the Guard type for a lock is `Send`.
+pub struct GuardSend(());
+
+/// Marker type which indicates that the Guard type for a lock is not `Send`.
+pub struct GuardNoSend(*mut ());
+
+mod mutex;
+pub use mutex::*;
+
+mod remutex;
+pub use remutex::*;
+
+mod rwlock;
+pub use rwlock::*;
diff --git a/rustc_deps/vendor/lock_api/src/mutex.rs b/rustc_deps/vendor/lock_api/src/mutex.rs
new file mode 100644
index 0000000..b9cf49b
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/src/mutex.rs
@@ -0,0 +1,550 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Basic operations for a mutex.
+///
+/// Types implementing this trait can be used by `Mutex` to form a safe and
+/// fully-functioning mutex type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the mutex is actually
+/// exclusive: a lock can't be acquired while the mutex is already locked.
+pub unsafe trait RawMutex {
+    /// Initial value for an unlocked mutex.
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires this mutex, blocking the current thread until it is able to do so.
+    fn lock(&self);
+
+    /// Attempts to acquire this mutex without blocking.
+    fn try_lock(&self) -> bool;
+
+    /// Unlocks this mutex.
+    fn unlock(&self);
+}
+
+/// Additional methods for mutexes which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawMutexFair: RawMutex {
+    /// Unlocks this mutex using a fair unlock protocol.
+    fn unlock_fair(&self);
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump(&self) {
+        self.unlock_fair();
+        self.lock();
+    }
+}
+
+/// Additional methods for mutexes which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawMutexTimed: RawMutex {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can also be statically initialized or created via a `new`
+/// constructor. Each mutex has a type parameter which represents the data that
+/// it is protecting. The data can only be accessed through the RAII guards
+/// returned from `lock` and `try_lock`, which guarantees that the data is only
+/// ever accessed when the mutex is locked.
+pub struct Mutex<R: RawMutex, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
+unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
+
+impl<R: RawMutex, T> Mutex<R, T> {
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
+    #[inline]
+    fn guard(&self) -> MutexGuard<R, T> {
+        MutexGuard {
+            mutex: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a mutex, blocking the current thread until it is able to do so.
+    ///
+    /// This function will block the local thread until it is available to acquire
+    /// the mutex. Upon returning, the thread is the only thread with the mutex
+    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+    /// the guard goes out of scope, the mutex will be unlocked.
+    ///
+    /// Attempts to lock a mutex in the thread which already holds the lock will
+    /// result in a deadlock.
+    #[inline]
+    pub fn lock(&self) -> MutexGuard<R, T> {
+        self.raw.lock();
+        self.guard()
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock() {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `MutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+}
+
+impl<R: RawMutexFair, T: ?Sized> Mutex<R, T> {
+    /// Forcibly unlocks the mutex using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock_for(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock_until(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + Default> Default for Mutex<R, T> {
+    #[inline]
+    fn default() -> Mutex<R, T> {
+        Mutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, T> From<T> for Mutex<R, T> {
+    #[inline]
+    fn from(t: T) -> Mutex<R, T> {
+        Mutex::new(t)
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
+            None => f.pad("Mutex { <locked> }"),
+        }
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` and `DerefMut` implementations.
+#[must_use]
+pub struct MutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+    mutex: &'a Mutex<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Returns a reference to the original `Mutex` object.
+    pub fn mutex(s: &Self) -> &'a Mutex<R, T> {
+        s.mutex
+    }
+
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &s.mutex.raw;
+        let data = f(unsafe { &mut *s.mutex.data.get() });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.mutex.raw;
+        let data = match f(unsafe { &mut *s.mutex.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.mutex.raw.unlock();
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.mutex.raw.unlock_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.mutex.raw.unlock_fair();
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.mutex.raw.bump();
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.mutex.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {}
+
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedMutexGuard<'a, R, T>
+{}
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Send for MappedMutexGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &mut *s.data });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &mut *s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {}
diff --git a/rustc_deps/vendor/lock_api/src/remutex.rs b/rustc_deps/vendor/lock_api/src/remutex.rs
new file mode 100644
index 0000000..3db9f51
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/src/remutex.rs
@@ -0,0 +1,618 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::{Cell, UnsafeCell};
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::Deref;
+use core::sync::atomic::{AtomicUsize, Ordering};
+use mutex::{RawMutex, RawMutexFair, RawMutexTimed};
+use GuardNoSend;
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Helper trait which returns a non-zero thread ID.
+///
+/// The simplest way to implement this trait is to return the address of a
+/// thread-local variable.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that no two active threads share
+/// the same thread ID. However the ID of a thread that has exited can be
+/// re-used since that thread is no longer active.
+pub unsafe trait GetThreadId {
+    /// Initial value.
+    const INIT: Self;
+
+    /// Returns a non-zero thread ID which identifies the current thread of
+    /// execution.
+    fn nonzero_thread_id(&self) -> usize;
+}
+
+struct RawReentrantMutex<R: RawMutex, G: GetThreadId> {
+    owner: AtomicUsize,
+    lock_count: Cell<usize>,
+    mutex: R,
+    get_thread_id: G,
+}
+
+impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
+        let id = self.get_thread_id.nonzero_thread_id();
+        if self.owner.load(Ordering::Relaxed) == id {
+            self.lock_count.set(
+                self.lock_count
+                    .get()
+                    .checked_add(1)
+                    .expect("ReentrantMutex lock count overflow"),
+            );
+        } else {
+            if !try_lock() {
+                return false;
+            }
+            self.owner.store(id, Ordering::Relaxed);
+            self.lock_count.set(1);
+        }
+        true
+    }
+
+    #[inline]
+    fn lock(&self) {
+        self.lock_internal(|| {
+            self.mutex.lock();
+            true
+        });
+    }
+
+    #[inline]
+    fn try_lock(&self) -> bool {
+        self.lock_internal(|| self.mutex.try_lock())
+    }
+
+    #[inline]
+    fn unlock(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock();
+        } else {
+            self.lock_count.set(lock_count);
+        }
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn unlock_fair(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock_fair();
+        } else {
+            self.lock_count.set(lock_count);
+        }
+    }
+
+    #[inline]
+    fn bump(&self) {
+        if self.lock_count.get() == 1 {
+            let id = self.owner.load(Ordering::Relaxed);
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.bump();
+            self.owner.store(id, Ordering::Relaxed);
+        }
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn try_lock_until(&self, timeout: R::Instant) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_until(timeout))
+    }
+
+    #[inline]
+    fn try_lock_for(&self, timeout: R::Duration) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_for(timeout))
+    }
+}
+
+/// A mutex which can be recursively locked by a single thread.
+///
+/// This type is identical to `Mutex` except for the following points:
+///
+/// - Locking multiple times from the same thread will work correctly instead of
+///   deadlocking.
+/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
+///   Use a `RefCell` if you need this.
+///
+/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
+/// primitive.
+pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
+    raw: RawReentrantMutex<R, G>,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
+    for ReentrantMutex<R, G, T>
+{}
+unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
+    for ReentrantMutex<R, G, T>
+{}
+
+impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    #[inline]
+    fn guard(&self) -> ReentrantMutexGuard<R, G, T> {
+        ReentrantMutexGuard {
+            remutex: &self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a reentrant mutex, blocking the current thread until it is able
+    /// to do so.
+    ///
+    /// If the mutex is held by another thread then this function will block the
+    /// local thread until it is available to acquire the mutex. If the mutex is
+    /// already held by the current thread then this function will increment the
+    /// lock reference count and return immediately. Upon returning,
+    /// the thread is the only thread with the mutex held. An RAII guard is
+    /// returned to allow scoped unlock of the lock. When the guard goes out of
+    /// scope, the mutex will be unlocked.
+    #[inline]
+    pub fn lock(&self) -> ReentrantMutexGuard<R, G, T> {
+        self.raw.lock();
+        self.guard()
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock() {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `ReentrantMutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw.mutex
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Forcibly unlocks the mutex using a fair unlock protocol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock_for(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock_until(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn default() -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn from(t: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(t)
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f
+                .debug_struct("ReentrantMutex")
+                .field("data", &&*guard)
+                .finish(),
+            None => f.pad("ReentrantMutex { <locked> }"),
+        }
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
+/// is dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` implementation.
+#[must_use]
+pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+    remutex: &'a ReentrantMutex<R, G, T>,
+    marker: PhantomData<(&'a T, GuardNoSend)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for ReentrantMutexGuard<'a, R, G, T>
+{}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
+    /// Returns a reference to the original `ReentrantMutex` object.
+    pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
+        s.remutex
+    }
+
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.remutex.raw;
+        let data = f(unsafe { &*s.remutex.data.get() });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.remutex.raw;
+        let data = match f(unsafe { &mut *s.remutex.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.remutex.raw.unlock();
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    ReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.remutex.raw.unlock_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.remutex.raw.unlock_fair();
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.remutex.raw.bump();
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.remutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        self.remutex.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for ReentrantMutexGuard<'a, R, G, T>
+{}
+
+/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+    raw: &'a RawReentrantMutex<R, G>,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &*s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{}
diff --git a/rustc_deps/vendor/lock_api/src/rwlock.rs b/rustc_deps/vendor/lock_api/src/rwlock.rs
new file mode 100644
index 0000000..4edb917
--- /dev/null
+++ b/rustc_deps/vendor/lock_api/src/rwlock.rs
@@ -0,0 +1,1453 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Basic operations for a reader-writer lock.
+///
+/// Types implementing this trait can be used by `RwLock` to form a safe and
+/// fully-functioning `RwLock` type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the `RwLock` is actually
+/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
+/// lock exists, and a shared lock can't be acquire while an exclusive lock
+/// exists.
+pub unsafe trait RawRwLock {
+    /// Initial value for an unlocked `RwLock`.
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires a shared lock, blocking the current thread until it is able to do so.
+    fn lock_shared(&self);
+
+    /// Attempts to acquire a shared lock without blocking.
+    fn try_lock_shared(&self) -> bool;
+
+    /// Releases a shared lock.
+    fn unlock_shared(&self);
+
+    /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
+    fn lock_exclusive(&self);
+
+    /// Attempts to acquire an exclusive lock without blocking.
+    fn try_lock_exclusive(&self) -> bool;
+
+    /// Releases an exclusive lock.
+    fn unlock_exclusive(&self);
+}
+
+/// Additional methods for RwLocks which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawRwLockFair: RawRwLock {
+    /// Releases a shared lock using a fair unlock protocol.
+    fn unlock_shared_fair(&self);
+
+    /// Releases an exclusive lock using a fair unlock protocol.
+    fn unlock_exclusive_fair(&self);
+
+    /// Temporarily yields a shared lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_shared_fair` followed
+    /// by `lock_shared`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_shared(&self) {
+        self.unlock_shared_fair();
+        self.lock_shared();
+    }
+
+    /// Temporarily yields an exclusive lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
+    /// by `lock_exclusive`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_exclusive(&self) {
+        self.unlock_exclusive_fair();
+        self.lock_exclusive();
+    }
+}
+
+/// Additional methods for RwLocks which support atomically downgrading an
+/// exclusive lock to a shared lock.
+pub unsafe trait RawRwLockDowngrade: RawRwLock {
+    /// Atomically downgrades an exclusive lock into a shared lock without
+    /// allowing any thread to take an exclusive lock in the meantime.
+    fn downgrade(&self);
+}
+
+/// Additional methods for RwLocks which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawRwLockTimed: RawRwLock {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks.
+///
+/// These are guaranteed to succeed without blocking if
+/// another read lock is held at the time of the call. This allows a thread
+/// to recursively lock a `RwLock`. However using this method can cause
+/// writers to starve since readers no longer block if a writer is waiting
+/// for the lock.
+pub unsafe trait RawRwLockRecursive: RawRwLock {
+    /// Acquires a shared lock without deadlocking in case of a recursive lock.
+    fn lock_shared_recursive(&self);
+
+    /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks and timeouts.
+pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support atomically upgrading a shared
+/// lock to an exclusive lock.
+///
+/// This requires acquiring a special "upgradable read lock" instead of a
+/// normal shared lock. There may only be one upgradable lock at any time,
+/// otherwise deadlocks could occur when upgrading.
+pub unsafe trait RawRwLockUpgrade: RawRwLock {
+    /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
+    fn lock_upgradable(&self);
+
+    /// Attempts to acquire an upgradable lock without blocking.
+    fn try_lock_upgradable(&self) -> bool;
+
+    /// Releases an upgradable lock.
+    fn unlock_upgradable(&self);
+
+    /// Upgrades an upgradable lock to an exclusive lock.
+    fn upgrade(&self);
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock without
+    /// blocking.
+    fn try_upgrade(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support upgradable locks and fair
+/// unlocking.
+pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
+    /// Releases an upgradable lock using a fair unlock protocol.
+    fn unlock_upgradable_fair(&self);
+
+    /// Temporarily yields an upgradable lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
+    /// by `lock_upgradable`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_upgradable(&self) {
+        self.unlock_upgradable_fair();
+        self.lock_upgradable();
+    }
+}
+
+/// Additional methods for RwLocks which support upgradable locks and lock
+/// downgrading.
+pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
+    /// Downgrades an upgradable lock to a shared lock.
+    fn downgrade_upgradable(&self);
+
+    /// Downgrades an exclusive lock to an upgradable lock.
+    fn downgrade_to_upgradable(&self);
+}
+
+/// Additional methods for RwLocks which support upgradable locks and locking
+/// with timeouts.
+pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A reader-writer lock
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+pub struct RwLock<R: RawRwLock, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
+unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
+
+impl<R: RawRwLock, T> RwLock<R, T> {
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
+    #[inline]
+    fn read_guard(&self) -> RwLockReadGuard<R, T> {
+        RwLockReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn write_guard(&self) -> RwLockWriteGuard<R, T> {
+        RwLockWriteGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
+    /// the current thread already holds one may result in a deadlock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read(&self) -> RwLockReadGuard<R, T> {
+        self.raw.lock_shared();
+        self.read_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read(&self) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared() {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Locks this `RwLock` with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this `RwLock`
+    /// when dropped.
+    #[inline]
+    pub fn write(&self) -> RwLockWriteGuard<R, T> {
+        self.raw.lock_exclusive();
+        self.write_guard()
+    }
+
+    /// Attempts to lock this `RwLock` with exclusive write access.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the lock when
+    /// it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive() {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks a read lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read(&self) {
+        self.raw.unlock_shared();
+    }
+
+    /// Forcibly unlocks a write lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write(&self) {
+        self.raw.unlock_exclusive();
+    }
+
+    /// Returns the underlying raw reader-writer lock object.
+    ///
+    /// Note that you will most likely need to import the `RawRwLock` trait from
+    /// `lock_api` to be able to call functions on the raw
+    /// reader-writer lock.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a lock guard.
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+}
+
+impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
+    /// Forcibly unlocks a read lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read_fair(&self) {
+        self.raw.unlock_shared_fair();
+    }
+
+    /// Forcibly unlocks a write lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write_fair(&self) {
+        self.raw.unlock_exclusive_fair();
+    }
+}
+
+impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_for(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_until(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive_for(timeout) {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive_until(timeout) {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Unlike `read`, this method is guaranteed to succeed without blocking if
+    /// another read lock is held at the time of the call. This allows a thread
+    /// to recursively lock a `RwLock`. However using this method can cause
+    /// writers to starve since readers no longer block if a writer is waiting
+    /// for the lock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read_recursive(&self) -> RwLockReadGuard<R, T> {
+        self.raw.lock_shared_recursive();
+        self.read_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This method is guaranteed to succeed if another read lock is held at the
+    /// time of the call. See the documentation for `read_recursive` for details.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive() {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    ///
+    /// This method is guaranteed to succeed without blocking if another read
+    /// lock is held at the time of the call. See the documentation for
+    /// `read_recursive` for details.
+    #[inline]
+    pub fn try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive_for(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive_until(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
+    #[inline]
+    fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T> {
+        RwLockUpgradableReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with upgradable read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers or other
+    /// upgradable reads which hold the lock. There may be other readers currently
+    /// inside the lock when this method returns.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T> {
+        self.raw.lock_upgradable();
+        self.upgradable_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable() {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_for(
+        &self,
+        timeout: R::Duration,
+    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable_for(timeout) {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_until(
+        &self,
+        timeout: R::Instant,
+    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable_until(timeout) {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
+    #[inline]
+    fn default() -> RwLock<R, T> {
+        RwLock::new(Default::default())
+    }
+}
+
+impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
+    #[inline]
+    fn from(t: T) -> RwLock<R, T> {
+        RwLock::new(t)
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_read() {
+            Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
+            None => f.pad("RwLock { <locked> }"),
+        }
+    }
+}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.rwlock.raw;
+        let data = f(unsafe { &*s.rwlock.data.get() });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = &s.rwlock.raw;
+        let data = match f(unsafe { &*s.rwlock.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_shared();
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_shared_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_shared_fair();
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_shared();
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_shared();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &s.rwlock.raw;
+        let data = f(unsafe { &mut *s.rwlock.data.get() });
+        mem::forget(s);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.rwlock.raw;
+        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_exclusive();
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into a read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade_to_upgradable();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockUpgradableReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_exclusive_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_exclusive_fair();
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `write`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_exclusive();
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_exclusive();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
+
+/// RAII structure used to release the upgradable read access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
+    for RwLockUpgradableReadGuard<'a, R, T>
+{}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_upgradable();
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
+    /// blocking the current thread until it can be acquired.
+    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
+        s.rwlock.raw.upgrade();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockWriteGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
+    ///
+    /// If the access could not be granted at this time, then the current guard is returned.
+    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade() {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_upgradable_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_upgradable_fair();
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `upgradable_read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_upgradable();
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Atomically downgrades an upgradable read lock lock into a shared read lock
+    /// without allowing any writers to take exclusive access of the lock in the
+    /// meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade_upgradable();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    pub fn try_upgrade_for(
+        s: Self,
+        timeout: R::Duration,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade_for(timeout) {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    #[inline]
+    pub fn try_upgrade_until(
+        s: Self,
+        timeout: R::Instant,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade_until(timeout) {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_upgradable();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
+    for RwLockUpgradableReadGuard<'a, R, T>
+{}
+
+/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &*s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_shared_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock_shared();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockReadGuard<'a, R, T>
+{}
+
+/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedRwLockWriteGuard<'a, R, T>
+{}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &mut *s.data });
+        mem::forget(s);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &mut *s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into a read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
+        s.raw.downgrade();
+        let raw = s.raw;
+        let data = s.data;
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_exclusive_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock_exclusive();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockWriteGuard<'a, R, T>
+{}
diff --git a/rustc_deps/vendor/owning_ref/.cargo-checksum.json b/rustc_deps/vendor/owning_ref/.cargo-checksum.json
index aeedd0a..636230f 100644
--- a/rustc_deps/vendor/owning_ref/.cargo-checksum.json
+++ b/rustc_deps/vendor/owning_ref/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"d6033830eecd6112eb61e775f848ab6c7ee76822ee1d7f8786b34e6e71f45b7b","LICENSE":"90bc15ed094593083fd129fdd1a03607be80fe8839c5564616a5961ab7f7a194","README.md":"e18ff10a148a8316e89bbe9f45cb57657170abe1a19154f8a5c968d529fe895e","src/lib.rs":"5d8857eee0de9863c3b6fce98732427cc1868a5ec3f0e3ce652ec011391aa842"},"package":"cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"}
\ No newline at end of file
+{"files":{"Cargo.toml":"9cc8936f2504375b24b88060b766a908e69690246cd063138b3a9b39287640d7","LICENSE":"90bc15ed094593083fd129fdd1a03607be80fe8839c5564616a5961ab7f7a194","README.md":"a479bb465878f3cbb8d53d2f42a26ac309e121d7c635956b7547a6bdfedce5d4","src/lib.rs":"e1e523638bea245fcf00b6643c61f856f2404f4d5d9ed0c230a6c4d2d8550b1c"},"package":"49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/owning_ref/Cargo.toml b/rustc_deps/vendor/owning_ref/Cargo.toml
index 24edb37..4ef64ea 100644
--- a/rustc_deps/vendor/owning_ref/Cargo.toml
+++ b/rustc_deps/vendor/owning_ref/Cargo.toml
@@ -1,15 +1,24 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
 [package]
 name = "owning_ref"
-version = "0.3.3"
+version = "0.4.0"
 authors = ["Marvin Löbel <loebel.marvin@gmail.com>"]
-license = "MIT"
-
 description = "A library for creating references that carry their owner with them."
-readme = "README.md"
 documentation = "http://kimundi.github.io/owning-ref-rs/owning_ref/index.html"
-
-repository = "https://github.com/Kimundi/owning-ref-rs"
+readme = "README.md"
 keywords = ["reference", "sibling", "field", "owning"]
-
-[dependencies]
-stable_deref_trait = "1.0.0"
+license = "MIT"
+repository = "https://github.com/Kimundi/owning-ref-rs"
+[dependencies.stable_deref_trait]
+version = "1.0.0"
diff --git a/rustc_deps/vendor/owning_ref/README.md b/rustc_deps/vendor/owning_ref/README.md
index c4c74f1..cd14b2e 100644
--- a/rustc_deps/vendor/owning_ref/README.md
+++ b/rustc_deps/vendor/owning_ref/README.md
@@ -4,7 +4,7 @@
 A library for creating references that carry their owner with them.
 
 This can sometimes be useful because Rust borrowing rules normally prevent
-moving a type that has been moved from. For example, this kind of code gets rejected:
+moving a type that has been borrowed from. For example, this kind of code gets rejected:
 
 ```rust
 fn return_owned_and_referenced<'a>() -> (Vec<u8>, &'a [u8]) {
diff --git a/rustc_deps/vendor/owning_ref/src/lib.rs b/rustc_deps/vendor/owning_ref/src/lib.rs
index 21ed086..7a29136 100644
--- a/rustc_deps/vendor/owning_ref/src/lib.rs
+++ b/rustc_deps/vendor/owning_ref/src/lib.rs
@@ -465,15 +465,13 @@
 
     // TODO: wrap_owner
 
-    // FIXME: Naming convention?
-    /// A getter for the underlying owner.
-    pub fn owner(&self) -> &O {
+    /// A reference to the underlying owner.
+    pub fn as_owner(&self) -> &O {
         &self.owner
     }
 
-    // FIXME: Naming convention?
     /// Discards the reference and retrieves the owner.
-    pub fn into_inner(self) -> O {
+    pub fn into_owner(self) -> O {
         self.owner
     }
 }
@@ -711,15 +709,18 @@
 
     // TODO: wrap_owner
 
-    // FIXME: Naming convention?
-    /// A getter for the underlying owner.
-    pub fn owner(&self) -> &O {
+    /// A reference to the underlying owner.
+    pub fn as_owner(&self) -> &O {
         &self.owner
     }
 
-    // FIXME: Naming convention?
+    /// A mutable reference to the underlying owner.
+    pub fn as_owner_mut(&mut self) -> &mut O {
+        &mut self.owner
+    }
+
     /// Discards the reference and retrieves the owner.
-    pub fn into_inner(self) -> O {
+    pub fn into_owner(self) -> O {
         self.owner
     }
 }
@@ -855,6 +856,16 @@
           _owner: o,
         })
     }
+
+    /// A getter for the underlying owner.
+    pub fn as_owner(&self) -> &O {
+        &self._owner
+    }
+
+    /// Discards the dependent object and returns the owner.
+    pub fn into_owner(self) -> O {
+        self._owner
+    }
 }
 
 /////////////////////////////////////////////////////////////////////////////
@@ -898,6 +909,8 @@
 
 unsafe impl<O, T: ?Sized> StableAddress for OwningRef<O, T> {}
 
+unsafe impl<O, T: ?Sized> StableAddress for OwningRefMut<O, T> {}
+
 impl<O, T: ?Sized> AsRef<T> for OwningRef<O, T> {
     fn as_ref(&self) -> &T {
         &*self
@@ -952,7 +965,7 @@
     }
 }
 
-// ^ FIXME: Is a Into impl for calling into_inner() possible as well?
+// ^ FIXME: Is a Into impl for calling into_owner() possible as well?
 
 impl<O, T: ?Sized> Debug for OwningRef<O, T>
     where O: Debug,
@@ -961,7 +974,7 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
         write!(f,
                "OwningRef {{ owner: {:?}, reference: {:?} }}",
-               self.owner(),
+               self.as_owner(),
                &**self)
     }
 }
@@ -973,7 +986,7 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
         write!(f,
                "OwningRefMut {{ owner: {:?}, reference: {:?} }}",
-               self.owner(),
+               self.as_owner(),
                &**self)
     }
 }
@@ -1119,7 +1132,7 @@
 /// Typedef of a mutable owning reference that uses a `MutexGuard` as the owner.
 pub type MutexGuardRefMut<'a, T, U = T> = OwningRefMut<MutexGuard<'a, T>, U>;
 /// Typedef of a mutable owning reference that uses a `RwLockWriteGuard` as the owner.
-pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRefMut<RwLockWriteGuard<'a, T>, U>;
 
 unsafe impl<'a, T: 'a> IntoErased<'a> for Box<T> {
     type Erased = Box<Erased + 'a>;
@@ -1221,19 +1234,19 @@
         }
 
         #[test]
-        fn owner() {
+        fn as_owner() {
             let or: BoxRef<String> = Box::new(example().1).into();
             let or = or.map(|x| &x[..5]);
             assert_eq!(&*or, "hello");
-            assert_eq!(&**or.owner(), "hello world");
+            assert_eq!(&**or.as_owner(), "hello world");
         }
 
         #[test]
-        fn into_inner() {
+        fn into_owner() {
             let or: BoxRef<String> = Box::new(example().1).into();
             let or = or.map(|x| &x[..5]);
             assert_eq!(&*or, "hello");
-            let s = *or.into_inner();
+            let s = *or.into_owner();
             assert_eq!(&s, "hello world");
         }
 
@@ -1262,8 +1275,15 @@
             let foo = [413, 612];
             let bar = &foo;
 
+            // FIXME: lifetime inference fails us, and we can't easily define a lifetime for a closure
+            // (see https://github.com/rust-lang/rust/issues/22340)
+            // So we use a function to identify the lifetimes instead.
+            fn borrow<'a>(a: &'a &[i32; 2]) -> &'a i32 {
+                &a[0]
+            }
+
             let o: BoxRef<&[i32; 2]> = Box::new(bar).into();
-            let o: BoxRef<&[i32; 2], i32> = o.map(|a: &&[i32; 2]| &a[0]);
+            let o: BoxRef<&[i32; 2], i32> = o.map(borrow);
             let o: BoxRef<Erased, i32> = o.erase_owner();
 
             assert_eq!(*o, 413);
@@ -1651,19 +1671,19 @@
         }
 
         #[test]
-        fn owner() {
+        fn as_owner() {
             let or: BoxRefMut<String> = Box::new(example().1).into();
             let or = or.map_mut(|x| &mut x[..5]);
             assert_eq!(&*or, "hello");
-            assert_eq!(&**or.owner(), "hello world");
+            assert_eq!(&**or.as_owner(), "hello world");
         }
 
         #[test]
-        fn into_inner() {
+        fn into_owner() {
             let or: BoxRefMut<String> = Box::new(example().1).into();
             let or = or.map_mut(|x| &mut x[..5]);
             assert_eq!(&*or, "hello");
-            let s = *or.into_inner();
+            let s = *or.into_owner();
             assert_eq!(&s, "hello world");
         }
 
@@ -1693,8 +1713,15 @@
             let mut foo = [413, 612];
             let bar = &mut foo;
 
+            // FIXME: lifetime inference fails us, and we can't easily define a lifetime for a closure
+            // (see https://github.com/rust-lang/rust/issues/22340)
+            // So we use a function to identify the lifetimes instead.
+            fn borrow<'a>(a: &'a mut &mut [i32; 2]) -> &'a mut i32 {
+                &mut a[0]
+            }
+
             let o: BoxRefMut<&mut [i32; 2]> = Box::new(bar).into();
-            let o: BoxRefMut<&mut [i32; 2], i32> = o.map_mut(|a: &mut &mut [i32; 2]| &mut a[0]);
+            let o: BoxRefMut<&mut [i32; 2], i32> = o.map_mut(borrow);
             let o: BoxRefMut<Erased, i32> = o.erase_owner();
 
             assert_eq!(*o, 413);
diff --git a/rustc_deps/vendor/parking_lot/.cargo-checksum.json b/rustc_deps/vendor/parking_lot/.cargo-checksum.json
index 036e855..336edcf 100644
--- a/rustc_deps/vendor/parking_lot/.cargo-checksum.json
+++ b/rustc_deps/vendor/parking_lot/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"340557b3e58636c3cbc11ac4a9687e53a631fca6ec7330e75d8995416afeb98f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"c0c3ea87ba8f956451bd1749b1c74de5726c81243b6a4dbcbdfb9d2bb16a8350","appveyor.yml":"cfa9c3ae2476c879fe4240c306d45de6c2c04025212d8217fa76690888117594","src/condvar.rs":"41d10d6e4de01b63e2c5fbeb99dac4e600605b31c170e3baf5221cd999165e05","src/deadlock.rs":"ebeec23700e92e8f0634f3647bc90c88b2706540c6c74b389e928ef9c9c94045","src/elision.rs":"846eece6a6b4950f093fa186cbc4a1460f186e2542fa6b7730ecc42f5e10fced","src/lib.rs":"5c10332ef64885dc7a79ecb2ca8a2d004106e29a5b01e78c1cb51dd179a1bfed","src/mutex.rs":"908d426603acaca916f0f0c6b024f49af002fb1a621e6e745dfbc518152241f2","src/once.rs":"04ccd9b91a8fad3a9379f86cf5b24983a1b926f47cd302ca10e7d12ce1fffdab","src/raw_mutex.rs":"94567d066a0ede869a7e7cd38cba6cc98ca737224ae6969f1144dfaefcbbd836","src/raw_remutex.rs":"f17a26ff7c923e4bae118255852430eae91388c554f736951bcbad2d1ebd41a6","src/raw_rwlock.rs":"67b0809a9d9828f8d0ff841facdcf6e144fff8b7d2df227c4dc005d3fa480579","src/remutex.rs":"480c734aa5f70bb68b5ca34e513146ec7d31e795cbf61efce7b7f3d81de8338c","src/rwlock.rs":"75cde25f1949b974ebee8185c4c410cf2ae6e4ff4028bca43ce84ecd8aca74c6","src/stable.rs":"849374549a707a5238408e0aa30a2438babf817e4f30e338d61fbff297e64f1b","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"149d8f5b97f3c1133e3cfcd8886449959e856b557ff281e292b733d7c69e005e"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"e254fac6600c725edb746f31f41b1b2ceeb9cfc85f4f9a3e6af874c70b020823","Cargo.toml":"e5d073e0f98041047612bb2e226585f81fcb2d3eabef9579c9ce322f7708b38d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"a52cf38f796e7f12215662e8a3a23aa9802c170a09ecba0e4be766c88f95a9c5","appveyor.yml":"cb1d02316926d88e174976bfc6781194569ca27f386c50e3091d8e52587d30a2","src/condvar.rs":"ce127f75bad5c175abb8147aac4b5be78aabdb599c5f8f3aad77f6bc3705274d","src/deadlock.rs":"8916c2e2820bfd3a55860ddb9f1b907888406b68cdae2b7a2093c825d28f3b99","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"3e259bf3421f10c3e920daca511a4880b2620145a1fcb070a37548835c4f429a","src/mutex.rs":"0ac3e654e4aa2c3078a6aa22c83428d604e7f3f8ed4c261c40d030d232ca7b64","src/once.rs":"606e0e88d6c1ff82b69bda56e7409ec3a1aefa66b45b7fa42b88cba07ae70598","src/raw_mutex.rs":"881e75a843d76399d01c4ae0f09cd23b93b137b5035a47bd7886505132e58165","src/raw_rwlock.rs":"2e3c13e80cd06be53118ae2bcc7bdec708dda8c139c371ee12885f48903cf69c","src/remutex.rs":"bad8022610344086010b0661998a416db4b458c222e671b67df03fc4795c0298","src/rwlock.rs":"fc826cbcf2d7862ecb184b657a82bb8794a9e26ac329c8f87b589fa09f15d245","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/parking_lot/CHANGELOG.md b/rustc_deps/vendor/parking_lot/CHANGELOG.md
new file mode 100644
index 0000000..20651aa
--- /dev/null
+++ b/rustc_deps/vendor/parking_lot/CHANGELOG.md
@@ -0,0 +1,48 @@
+0.6.3 (2018-07-18)
+==================
+
+- Export `RawMutex`, `RawRwLock` and `RawThreadId`.
+
+0.6.2 (2018-06-18)
+==================
+
+- Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79)
+
+0.6.1 (2018-06-08)
+==================
+
+Added missing typedefs for mapped lock guards:
+
+- `MappedMutexGuard`
+- `MappedReentrantMutexGuard`
+- `MappedRwLockReadGuard`
+- `MappedRwLockWriteGuard`
+
+0.6.0 (2018-06-08)
+==================
+
+This release moves most of the code for type-safe `Mutex` and `RwLock` types
+into a separate crate called `lock_api`. This new crate is compatible with
+`no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a
+raw mutex type which implements the `RawMutex` or `RawRwLock` trait. The API
+provided by the wrapper types can be extended by implementing more traits on the
+raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See the
+crate documentation for more details.
+
+There are also several major changes:
+
+- The minimum required Rust version is bumped to 1.26.
+- All methods on `MutexGuard` (and other guard types) are no longer inherent
+  methods and must be called as `MutexGuard::method(self)`. This avoids
+  conflicts with methods from the inner type.
+- `MutexGuard` (and other guard types) add the `unlocked` method which
+  temporarily unlocks a mutex, runs the given closure, and then re-locks the
+   mutex.
+- `MutexGuard` (and other guard types) add the `bump` method which gives a
+  chance for other threads to acquire the mutex by temporarily unlocking it and
+  re-locking it. However this is optimized for the common case where there are
+  no threads waiting on the lock, in which case no unlocking is performed.
+- `MutexGuard` (and other guard types) add the `map` method which returns a
+  `MappedMutexGuard` which holds only a subset of the original locked type. The
+  `MappedMutexGuard` type is identical to `MutexGuard` except that it does not
+  support the `unlocked` and `bump` methods, and can't be used with `CondVar`.
diff --git a/rustc_deps/vendor/parking_lot/Cargo.toml b/rustc_deps/vendor/parking_lot/Cargo.toml
index 8877f44..67765d1 100644
--- a/rustc_deps/vendor/parking_lot/Cargo.toml
+++ b/rustc_deps/vendor/parking_lot/Cargo.toml
@@ -12,24 +12,24 @@
 
 [package]
 name = "parking_lot"
-version = "0.4.8"
+version = "0.6.4"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "More compact and efficient implementations of the standard synchronization primitives."
-documentation = "https://amanieu.github.io/parking_lot/parking_lot/index.html"
 readme = "README.md"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
-[dependencies.owning_ref]
-version = "0.3"
-optional = true
+[dependencies.lock_api]
+version = "0.1"
 
 [dependencies.parking_lot_core]
-version = "0.2"
-[dev-dependencies.rand]
 version = "0.3"
+[dev-dependencies.rand]
+version = "0.5"
 
 [features]
-nightly = ["parking_lot_core/nightly"]
-default = ["owning_ref"]
 deadlock_detection = ["parking_lot_core/deadlock_detection"]
+default = ["owning_ref"]
+nightly = ["parking_lot_core/nightly", "lock_api/nightly"]
+owning_ref = ["lock_api/owning_ref"]
diff --git a/rustc_deps/vendor/parking_lot/README.md b/rustc_deps/vendor/parking_lot/README.md
index 8e6c86b..4e9204c 100644
--- a/rustc_deps/vendor/parking_lot/README.md
+++ b/rustc_deps/vendor/parking_lot/README.md
@@ -3,9 +3,11 @@
 
 [![Build Status](https://travis-ci.org/Amanieu/parking_lot.svg?branch=master)](https://travis-ci.org/Amanieu/parking_lot) [![Build status](https://ci.appveyor.com/api/projects/status/wppcc32ttpud0a30/branch/master?svg=true)](https://ci.appveyor.com/project/Amanieu/parking-lot/branch/master) [![Crates.io](https://img.shields.io/crates/v/parking_lot.svg)](https://crates.io/crates/parking_lot)
 
-[Documentation (synchronization primitives)](https://amanieu.github.io/parking_lot/parking_lot/index.html)
+[Documentation (synchronization primitives)](https://docs.rs/parking_lot/)
 
-[Documentation (core parking lot API)](https://amanieu.github.io/parking_lot/parking_lot_core/index.html)
+[Documentation (core parking lot API)](https://docs.rs/parking_lot_core/)
+
+[Documentation (type-safe lock API)](https://docs.rs/lock_api/)
 
 This library provides implementations of `Mutex`, `RwLock`, `Condvar` and
 `Once` that are smaller, faster and more flexible than those in the Rust
@@ -47,27 +49,25 @@
    library versions of those types.
 7. `RwLock` takes advantage of hardware lock elision on processors that
    support it, which can lead to huge performance wins with many readers.
-8. `MutexGuard` (and the `RwLock` equivalents) is `Send`, which means it can
-   be unlocked by a different thread than the one that locked it.
-9. `RwLock` uses a task-fair locking policy, which avoids reader and writer
+8. `RwLock` uses a task-fair locking policy, which avoids reader and writer
    starvation, whereas the standard library version makes no guarantees.
-10. `Condvar` is guaranteed not to produce spurious wakeups. A thread will
+9. `Condvar` is guaranteed not to produce spurious wakeups. A thread will
     only be woken up if it timed out or it was woken up by a notification.
-11. `Condvar::notify_all` will only wake up a single thread and requeue the
+10. `Condvar::notify_all` will only wake up a single thread and requeue the
     rest to wait on the associated `Mutex`. This avoids a thundering herd
     problem where all threads try to acquire the lock at the same time.
-12. `RwLock` supports atomically downgrading a write lock into a read lock.
-13. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object.
-14. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard
+11. `RwLock` supports atomically downgrading a write lock into a read lock.
+12. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object.
+13. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard
     object.
-15. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350)
+14. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350)
     which allows them to be fair on average without sacrificing performance.
-16. A `ReentrantMutex` type which supports recursive locking.
-17. An *experimental* deadlock detector that works for `Mutex`,
+15. A `ReentrantMutex` type which supports recursive locking.
+16. An *experimental* deadlock detector that works for `Mutex`,
     `RwLock` and `ReentrantMutex`. This feature is disabled by default and
-    can be enabled via the `deadlock_detection` feature. Note that enabling
-    it will also remove the `Send` marker from the lock guards as they are
-    incompatible.
+    can be enabled via the `deadlock_detection` feature.
+17. `RwLock` supports atomically upgrading an "upgradable" read lock into a
+    write lock.
 
 ## The parking lot
 
@@ -89,8 +89,9 @@
   `Condvar` and `RwLock` types instead of `const fn`.
 - `RwLock` will not be able to take advantage of hardware lock elision for
   readers, which improves performance when there are multiple readers.
-- Slightly less efficient code may be generated for `compare_exchange`
-  operations. This should not affect architectures like x86 though.
+
+To enable nightly-only functionality, you need to enable the `nightly` feature
+in Cargo (see below).
 
 ## Usage
 
@@ -98,7 +99,7 @@
 
 ```toml
 [dependencies]
-parking_lot = "0.4"
+parking_lot = "0.6"
 ```
 
 and this to your crate root:
@@ -111,9 +112,12 @@
 
 ```toml
 [dependencies]
-parking_lot = {version = "0.4", features = ["nightly"]}
+parking_lot = {version = "0.6", features = ["nightly"]}
 ```
 
+The experimental deadlock detector can be enabled with the
+`deadlock_detection` Cargo feature.
+
 The core parking lot API is provided by the `parking_lot_core` crate. It is
 separate from the synchronization primitives in the `parking_lot` crate so that
 changes to the core API do not cause breaking changes for users of `parking_lot`.
diff --git a/rustc_deps/vendor/parking_lot/appveyor.yml b/rustc_deps/vendor/parking_lot/appveyor.yml
index 7ee53f9..fef6b57 100644
--- a/rustc_deps/vendor/parking_lot/appveyor.yml
+++ b/rustc_deps/vendor/parking_lot/appveyor.yml
@@ -6,10 +6,10 @@
   - TARGET: nightly-i686-pc-windows-msvc
   - TARGET: nightly-x86_64-pc-windows-gnu
   - TARGET: nightly-i686-pc-windows-gnu
-  - TARGET: 1.18.0-x86_64-pc-windows-msvc
-  - TARGET: 1.18.0-i686-pc-windows-msvc
-  - TARGET: 1.18.0-x86_64-pc-windows-gnu
-  - TARGET: 1.18.0-i686-pc-windows-gnu
+  - TARGET: 1.24.0-x86_64-pc-windows-msvc
+  - TARGET: 1.24.0-i686-pc-windows-msvc
+  - TARGET: 1.24.0-x86_64-pc-windows-gnu
+  - TARGET: 1.24.0-i686-pc-windows-gnu
 
 install:
   - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts
diff --git a/rustc_deps/vendor/parking_lot/src/condvar.rs b/rustc_deps/vendor/parking_lot/src/condvar.rs
index 17671ac..89ddac7 100644
--- a/rustc_deps/vendor/parking_lot/src/condvar.rs
+++ b/rustc_deps/vendor/parking_lot/src/condvar.rs
@@ -5,13 +5,14 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use deadlock;
+use lock_api::RawMutex as RawMutexTrait;
+use mutex::MutexGuard;
+use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
+use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
 use std::sync::atomic::{AtomicPtr, Ordering};
 use std::time::{Duration, Instant};
-use std::ptr;
-use parking_lot_core::{self, ParkResult, UnparkResult, RequeueOp, DEFAULT_PARK_TOKEN};
-use mutex::{MutexGuard, guard_lock};
-use raw_mutex::{RawMutex, TOKEN_NORMAL, TOKEN_HANDOFF};
-use deadlock;
+use std::{fmt, ptr};
 
 /// A type indicating whether a timed wait on a condition variable returned
 /// due to a time out or not.
@@ -89,7 +90,9 @@
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Condvar {
-        Condvar { state: AtomicPtr::new(ptr::null_mut()) }
+        Condvar {
+            state: AtomicPtr::new(ptr::null_mut()),
+        }
     }
 
     /// Creates a new condition variable which is ready to be waited on and
@@ -97,7 +100,9 @@
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> Condvar {
-        Condvar { state: AtomicPtr::new(ptr::null_mut()) }
+        Condvar {
+            state: AtomicPtr::new(ptr::null_mut()),
+        }
     }
 
     /// Wakes up one blocked thread on this condvar.
@@ -212,7 +217,7 @@
     /// with a different `Mutex` object.
     #[inline]
     pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) {
-        self.wait_until_internal(guard_lock(mutex_guard), None);
+        self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None);
     }
 
     /// Waits on this condition variable for a notification, timing out after
@@ -244,12 +249,19 @@
         mutex_guard: &mut MutexGuard<T>,
         timeout: Instant,
     ) -> WaitTimeoutResult {
-        self.wait_until_internal(guard_lock(mutex_guard), Some(timeout))
+        self.wait_until_internal(
+            unsafe { MutexGuard::mutex(mutex_guard).raw() },
+            Some(timeout),
+        )
     }
 
     // This is a non-generic function to reduce the monomorphization cost of
     // using `wait_until`.
-    fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult {
+    fn wait_until_internal(
+        &self,
+        mutex: &RawMutex,
+        timeout: Option<Instant>,
+    ) -> WaitTimeoutResult {
         unsafe {
             let result;
             let mut bad_mutex = false;
@@ -272,7 +284,7 @@
                 };
                 let before_sleep = || {
                     // Unlock the mutex before sleeping...
-                    mutex.unlock(false);
+                    mutex.unlock();
                 };
                 let timed_out = |k, was_last_thread| {
                     // If we were requeued to a mutex, then we did not time out.
@@ -350,6 +362,12 @@
     }
 }
 
+impl fmt::Debug for Condvar {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad("Condvar { .. }")
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use std::sync::mpsc::channel;
@@ -506,4 +524,10 @@
 
         let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1));
     }
+
+    #[test]
+    fn test_debug_condvar() {
+        let c = Condvar::new();
+        assert_eq!(format!("{:?}", c), "Condvar { .. }");
+    }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/deadlock.rs b/rustc_deps/vendor/parking_lot/src/deadlock.rs
index d043a82..ef7d619 100644
--- a/rustc_deps/vendor/parking_lot/src/deadlock.rs
+++ b/rustc_deps/vendor/parking_lot/src/deadlock.rs
@@ -1,10 +1,7 @@
-//! [Experimental] Deadlock detection
+//! \[Experimental\] Deadlock detection
 //!
 //! This feature is optional and can be enabled via the `deadlock_detection` feature flag.
 //!
-//! Enabling this feature will *remove* the `Send` marker from `Mutex` and `RwLock` Guards
-//! as locking/unlocking in different threads is incompatible with the deadlock detector.
-//!
 //! # Example
 //!
 //! ```
@@ -36,29 +33,17 @@
 //! } // only for #[cfg]
 //! ```
 
-
 #[cfg(feature = "deadlock_detection")]
 pub use parking_lot_core::deadlock::check_deadlock;
 pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
 
-#[cfg(not(feature = "deadlock_detection"))]
-pub(crate) struct DeadlockDetectionMarker;
-
-// when deadlock detector is enabled we want the marker to be !Send + Sync
-#[cfg(feature = "deadlock_detection")]
-use std::marker::PhantomData;
-#[cfg(feature = "deadlock_detection")]
-pub(crate) struct DeadlockDetectionMarker(PhantomData<*mut ()>); // !Send
-#[cfg(feature = "deadlock_detection")]
-unsafe impl Sync for DeadlockDetectionMarker {} // Sync
-
 #[cfg(test)]
 #[cfg(feature = "deadlock_detection")]
 mod tests {
-    use std::thread::{self, sleep};
     use std::sync::{Arc, Barrier};
+    use std::thread::{self, sleep};
     use std::time::Duration;
-    use {Mutex, RwLock, ReentrantMutex};
+    use {Mutex, ReentrantMutex, RwLock};
 
     fn check_deadlock() -> bool {
         use parking_lot_core::deadlock::check_deadlock;
diff --git a/rustc_deps/vendor/parking_lot/src/elision.rs b/rustc_deps/vendor/parking_lot/src/elision.rs
index bd43f95..23895b1 100644
--- a/rustc_deps/vendor/parking_lot/src/elision.rs
+++ b/rustc_deps/vendor/parking_lot/src/elision.rs
@@ -5,10 +5,7 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-#[cfg(feature = "nightly")]
 use std::sync::atomic::AtomicUsize;
-#[cfg(not(feature = "nightly"))]
-use stable::AtomicUsize;
 
 // Extension trait to add lock elision primitives to atomic types
 pub trait AtomicElisionExt {
@@ -67,7 +64,11 @@
                  : "r" (new), "{eax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 
@@ -80,12 +81,55 @@
                  : "r" (new), "{eax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 }
 
-#[cfg(all(feature = "nightly", target_arch = "x86_64"))]
+#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "32"))]
+impl AtomicElisionExt for AtomicUsize {
+    type IntType = usize;
+
+    #[inline]
+    fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
+        unsafe {
+            let prev: usize;
+            asm!("xacquire; lock; cmpxchgl $2, $1"
+                 : "={rax}" (prev), "+*m" (self)
+                 : "r" (new), "{rax}" (current)
+                 : "memory"
+                 : "volatile");
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
+        }
+    }
+
+    #[inline]
+    fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
+        unsafe {
+            let prev: usize;
+            asm!("xrelease; lock; cmpxchgl $2, $1"
+                 : "={rax}" (prev), "+*m" (self)
+                 : "r" (new), "{rax}" (current)
+                 : "memory"
+                 : "volatile");
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
+        }
+    }
+}
+
+#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "64"))]
 impl AtomicElisionExt for AtomicUsize {
     type IntType = usize;
 
@@ -98,7 +142,11 @@
                  : "r" (new), "{rax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 
@@ -111,7 +159,11 @@
                  : "r" (new), "{rax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/lib.rs b/rustc_deps/vendor/parking_lot/src/lib.rs
index e361a8d..f16b821 100644
--- a/rustc_deps/vendor/parking_lot/src/lib.rs
+++ b/rustc_deps/vendor/parking_lot/src/lib.rs
@@ -11,60 +11,34 @@
 
 #![warn(missing_docs)]
 #![cfg_attr(feature = "nightly", feature(const_fn))]
-#![cfg_attr(feature = "nightly", feature(const_atomic_u8_new))]
-#![cfg_attr(feature = "nightly", feature(const_atomic_usize_new))]
-#![cfg_attr(feature = "nightly", feature(const_cell_new))]
-#![cfg_attr(feature = "nightly", feature(const_ptr_null_mut))]
-#![cfg_attr(feature = "nightly", feature(const_atomic_ptr_new))]
-#![cfg_attr(feature = "nightly", feature(const_unsafe_cell_new))]
 #![cfg_attr(feature = "nightly", feature(integer_atomics))]
 #![cfg_attr(feature = "nightly", feature(asm))]
 
-#[cfg(feature = "owning_ref")]
-extern crate owning_ref;
-
+extern crate lock_api;
 extern crate parking_lot_core;
 
-#[cfg(not(feature = "nightly"))]
-mod stable;
-
-mod util;
-mod elision;
-mod raw_mutex;
-mod raw_remutex;
-mod raw_rwlock;
 mod condvar;
+mod elision;
 mod mutex;
+mod once;
+mod raw_mutex;
+mod raw_rwlock;
 mod remutex;
 mod rwlock;
-mod once;
+mod util;
 
 #[cfg(feature = "deadlock_detection")]
 pub mod deadlock;
 #[cfg(not(feature = "deadlock_detection"))]
 mod deadlock;
 
-pub use once::{Once, ONCE_INIT, OnceState};
-pub use mutex::{Mutex, MutexGuard};
-pub use remutex::{ReentrantMutex, ReentrantMutexGuard};
 pub use condvar::{Condvar, WaitTimeoutResult};
-pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
-
-#[cfg(feature = "owning_ref")]
-use owning_ref::OwningRef;
-
-/// Typedef of an owning reference that uses a `MutexGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `ReentrantMutexGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type ReentrantMutexGuardRef<'a, T, U = T> = OwningRef<ReentrantMutexGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `RwLockReadGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `RwLockWriteGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+pub use mutex::{MappedMutexGuard, Mutex, MutexGuard};
+pub use once::{Once, OnceState, ONCE_INIT};
+pub use raw_mutex::RawMutex;
+pub use raw_rwlock::RawRwLock;
+pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard};
+pub use rwlock::{
+    MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
+    RwLockUpgradableReadGuard, RwLockWriteGuard,
+};
diff --git a/rustc_deps/vendor/parking_lot/src/mutex.rs b/rustc_deps/vendor/parking_lot/src/mutex.rs
index 10fef8e..d530400 100644
--- a/rustc_deps/vendor/parking_lot/src/mutex.rs
+++ b/rustc_deps/vendor/parking_lot/src/mutex.rs
@@ -5,17 +5,8 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::UnsafeCell;
-use std::ops::{Deref, DerefMut};
-use std::time::{Duration, Instant};
-use std::fmt;
-use std::mem;
-use std::marker::PhantomData;
+use lock_api;
 use raw_mutex::RawMutex;
-use deadlock::DeadlockDetectionMarker;
-
-#[cfg(feature = "owning_ref")]
-use owning_ref::StableAddress;
 
 /// A mutual exclusion primitive useful for protecting shared data
 ///
@@ -51,7 +42,6 @@
 /// - No poisoning, the lock is released normally on panic.
 /// - Only requires 1 byte of space, whereas the standard library boxes the
 ///   `Mutex` due to platform limitations.
-/// - A `MutexGuard` can be sent to another thread and unlocked there.
 /// - Can be statically constructed (requires the `const_fn` nightly feature).
 /// - Does not require any drop glue when dropped.
 /// - Inline fast path for the uncontended case.
@@ -95,255 +85,31 @@
 ///
 /// rx.recv().unwrap();
 /// ```
-pub struct Mutex<T: ?Sized> {
-    raw: RawMutex,
-    data: UnsafeCell<T>,
-}
-
-unsafe impl<T: Send> Send for Mutex<T> {}
-unsafe impl<T: Send> Sync for Mutex<T> {}
+pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
 
 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
 /// dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` and `DerefMut` implementations.
-#[must_use]
-pub struct MutexGuard<'a, T: ?Sized + 'a> {
-    mutex: &'a Mutex<T>,
-    marker: PhantomData<(&'a mut T, DeadlockDetectionMarker)>,
-}
+pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
 
-impl<T> Mutex<T> {
-    /// Creates a new mutex in an unlocked state ready for use.
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new(val: T) -> Mutex<T> {
-        Mutex {
-            data: UnsafeCell::new(val),
-            raw: RawMutex::new(),
-        }
-    }
-
-    /// Creates a new mutex in an unlocked state ready for use.
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new(val: T) -> Mutex<T> {
-        Mutex {
-            data: UnsafeCell::new(val),
-            raw: RawMutex::new(),
-        }
-    }
-
-    /// Consumes this mutex, returning the underlying data.
-    #[inline]
-    pub fn into_inner(self) -> T {
-        unsafe { self.data.into_inner() }
-    }
-}
-
-impl<T: ?Sized> Mutex<T> {
-    /// Acquires a mutex, blocking the current thread until it is able to do so.
-    ///
-    /// This function will block the local thread until it is available to acquire
-    /// the mutex. Upon returning, the thread is the only thread with the mutex
-    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
-    /// the guard goes out of scope, the mutex will be unlocked.
-    ///
-    /// Attempts to lock a mutex in the thread which already holds the lock will
-    /// result in a deadlock.
-    #[inline]
-    pub fn lock(&self) -> MutexGuard<T> {
-        self.raw.lock();
-        MutexGuard {
-            mutex: self,
-            marker: PhantomData,
-        }
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// If the lock could not be acquired at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
-    /// guard is dropped.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_lock(&self) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock() {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock_for(timeout) {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock_until(timeout) {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Returns a mutable reference to the underlying data.
-    ///
-    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
-    /// take place---the mutable borrow statically guarantees no locks exist.
-    #[inline]
-    pub fn get_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.data.get() }
-    }
-
-    /// Releases the mutex.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `MutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock(&self) {
-        self.raw.unlock(false);
-    }
-
-    /// Releases the mutex using a fair unlock protocol.
-    ///
-    /// See `MutexGuard::unlock_fair`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `MutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock_fair(&self) {
-        self.raw.unlock(true);
-    }
-}
-impl Mutex<()> {
-    /// Acquires a mutex, blocking the current thread until it is able to do so.
-    ///
-    /// This is similar to `lock`, except that a `MutexGuard` is not returned.
-    /// Instead you will need to call `raw_unlock` to release the mutex.
-    #[inline]
-    pub fn raw_lock(&self) {
-        self.raw.lock();
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// This is similar to `try_lock`, except that a `MutexGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// mutex.
-    #[inline]
-    pub fn raw_try_lock(&self) -> bool {
-        self.raw.try_lock()
-    }
-}
-
-impl<T: ?Sized + Default> Default for Mutex<T> {
-    #[inline]
-    fn default() -> Mutex<T> {
-        Mutex::new(Default::default())
-    }
-}
-
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.try_lock() {
-            Some(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
-            None => write!(f, "Mutex {{ <locked> }}"),
-        }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> MutexGuard<'a, T> {
-    /// Unlocks the mutex using a fair unlock protocol.
-    ///
-    /// By default, mutexes are unfair and allow the current thread to re-lock
-    /// the mutex before another has the chance to acquire the lock, even if
-    /// that thread has been blocked on the mutex for a long time. This is the
-    /// default because it allows much higher throughput as it avoids forcing a
-    /// context switch on every mutex unlock. This can result in one thread
-    /// acquiring a mutex many more times than other threads.
-    ///
-    /// However in some cases it can be beneficial to ensure fairness by forcing
-    /// the lock to pass on to a waiting thread if there is one. This is done by
-    /// using this method instead of dropping the `MutexGuard` normally.
-    #[inline]
-    pub fn unlock_fair(self) {
-        self.mutex.raw.unlock(true);
-        mem::forget(self);
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Deref for MutexGuard<'a, T> {
-    type Target = T;
-    #[inline]
-    fn deref(&self) -> &T {
-        unsafe { &*self.mutex.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, T> {
-    #[inline]
-    fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.mutex.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Drop for MutexGuard<'a, T> {
-    #[inline]
-    fn drop(&mut self) {
-        self.mutex.raw.unlock(false);
-    }
-}
-
-#[cfg(feature = "owning_ref")]
-unsafe impl<'a, T: ?Sized> StableAddress for MutexGuard<'a, T> {}
-
-// Helper function used by Condvar, not publicly exported
-#[inline]
-pub(crate) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a RawMutex {
-    &guard.mutex.raw
-}
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
 
 #[cfg(test)]
 mod tests {
+    use std::sync::atomic::{AtomicUsize, Ordering};
     use std::sync::mpsc::channel;
     use std::sync::Arc;
-    use std::sync::atomic::{AtomicUsize, Ordering};
     use std::thread;
-    use {Mutex, Condvar};
+    use {Condvar, Mutex};
 
     struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
 
@@ -504,15 +270,6 @@
         assert_eq!(&*mutex.lock(), comp);
     }
 
-    #[cfg(not(feature = "deadlock_detection"))]
-    #[test]
-    fn test_mutexguard_send() {
-        fn send<T: Send>(_: T) {}
-
-        let mutex = Mutex::new(());
-        send(mutex.lock());
-    }
-
     #[test]
     fn test_mutexguard_sync() {
         fn sync<T: Sync>(_: T) {}
@@ -520,4 +277,22 @@
         let mutex = Mutex::new(());
         sync(mutex.lock());
     }
+
+    #[test]
+    fn test_mutex_debug() {
+        let mutex = Mutex::new(vec![0u8, 10]);
+
+        assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
+        assert_eq!(
+            format!("{:#?}", mutex),
+            "Mutex {
+    data: [
+        0,
+        10
+    ]
+}"
+        );
+        let _lock = mutex.lock();
+        assert_eq!(format!("{:?}", mutex), "Mutex { <locked> }");
+    }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/once.rs b/rustc_deps/vendor/parking_lot/src/once.rs
index e1bb356..f3e70be 100644
--- a/rustc_deps/vendor/parking_lot/src/once.rs
+++ b/rustc_deps/vendor/parking_lot/src/once.rs
@@ -5,16 +5,20 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::sync::atomic::{fence, Ordering};
 #[cfg(feature = "nightly")]
-use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
+use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
-use stable::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
+use std::sync::atomic::AtomicUsize as AtomicU8;
+#[cfg(not(feature = "nightly"))]
+use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
-use std::mem;
 use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
+use std::fmt;
+use std::mem;
 use util::UncheckedOptionExt;
 
 const DONE_BIT: U8 = 1;
@@ -95,14 +99,14 @@
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Once {
-        Once(AtomicU8::new(0))
+        Once(ATOMIC_U8_INIT)
     }
 
     /// Creates a new `Once` value.
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> Once {
-        Once(AtomicU8::new(0))
+        Once(ATOMIC_U8_INIT)
     }
 
     /// Returns the current state of this `Once`.
@@ -267,8 +271,7 @@
                     state | PARKED_BIT,
                     Ordering::Relaxed,
                     Ordering::Relaxed,
-                )
-                {
+                ) {
                     state = x;
                     continue;
                 }
@@ -340,6 +343,14 @@
     }
 }
 
+impl fmt::Debug for Once {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Once")
+            .field("state", &self.state())
+            .finish()
+    }
+}
+
 #[cfg(test)]
 mod tests {
     #[cfg(feature = "nightly")]
@@ -400,11 +411,15 @@
         static O: Once = ONCE_INIT;
 
         // poison the once
-        let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
         assert!(t.is_err());
 
         // poisoning propagates
-        let t = panic::catch_unwind(|| { O.call_once(|| {}); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| {});
+        });
         assert!(t.is_err());
 
         // we can subvert poisoning, however
@@ -425,7 +440,9 @@
         static O: Once = ONCE_INIT;
 
         // poison the once
-        let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
         assert!(t.is_err());
 
         // make sure someone's waiting inside the once via a force
@@ -444,7 +461,9 @@
         // put another waiter on the once
         let t2 = thread::spawn(|| {
             let mut called = false;
-            O.call_once(|| { called = true; });
+            O.call_once(|| {
+                called = true;
+            });
             assert!(!called);
         });
 
@@ -452,6 +471,18 @@
 
         assert!(t1.join().is_ok());
         assert!(t2.join().is_ok());
+    }
 
+    #[test]
+    fn test_once_debug() {
+        static O: Once = ONCE_INIT;
+
+        assert_eq!(format!("{:?}", O), "Once { state: New }");
+        assert_eq!(
+            format!("{:#?}", O),
+            "Once {
+    state: New
+}"
+        );
     }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/raw_mutex.rs b/rustc_deps/vendor/parking_lot/src/raw_mutex.rs
index 13b0000..1ba5581 100644
--- a/rustc_deps/vendor/parking_lot/src/raw_mutex.rs
+++ b/rustc_deps/vendor/parking_lot/src/raw_mutex.rs
@@ -5,48 +5,49 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::sync::atomic::Ordering;
 #[cfg(feature = "nightly")]
-use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
-use stable::{AtomicU8, Ordering};
+use std::sync::atomic::AtomicUsize as AtomicU8;
+#[cfg(not(feature = "nightly"))]
+use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
-use std::time::{Duration, Instant};
-use parking_lot_core::{self, ParkResult, UnparkResult, SpinWait, UnparkToken, DEFAULT_PARK_TOKEN};
 use deadlock;
+use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
+use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
+use std::time::{Duration, Instant};
 
 // UnparkToken used to indicate that that the target thread should attempt to
 // lock the mutex again as soon as it is unparked.
-pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
+pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
 
 // UnparkToken used to indicate that the mutex is being handed off to the target
 // thread directly without unlocking it.
-pub const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
+pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
 
 const LOCKED_BIT: U8 = 1;
 const PARKED_BIT: U8 = 2;
 
+/// Raw mutex type backed by the parking lot.
 pub struct RawMutex {
     state: AtomicU8,
 }
 
-impl RawMutex {
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new() -> RawMutex {
-        RawMutex { state: AtomicU8::new(0) }
-    }
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> RawMutex {
-        RawMutex { state: AtomicU8::new(0) }
-    }
+unsafe impl RawMutexTrait for RawMutex {
+    const INIT: RawMutex = RawMutex {
+        state: ATOMIC_U8_INIT,
+    };
+
+    type GuardMarker = GuardNoSend;
 
     #[inline]
-    pub fn lock(&self) {
-        if self.state
+    fn lock(&self) {
+        if self
+            .state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_err()
         {
@@ -56,39 +57,7 @@
     }
 
     #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok()
-        {
-            true
-        } else {
-            self.lock_slow(Some(timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok()
-        {
-            true
-        } else {
-            self.lock_slow(Some(Instant::now() + timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock(&self) -> bool {
+    fn try_lock(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             if state & LOCKED_BIT != 0 {
@@ -110,17 +79,81 @@
     }
 
     #[inline]
-    pub fn unlock(&self, force_fair: bool) {
+    fn unlock(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
-        if self.state
+        if self
+            .state
             .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
-        self.unlock_slow(force_fair);
+        self.unlock_slow(false);
+    }
+}
+
+unsafe impl RawMutexFair for RawMutex {
+    #[inline]
+    fn unlock_fair(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self
+            .state
+            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_slow(true);
     }
 
+    #[inline]
+    fn bump(&self) {
+        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+            self.bump_slow();
+        }
+    }
+}
+
+unsafe impl RawMutexTimed for RawMutex {
+    type Duration = Duration;
+    type Instant = Instant;
+
+    #[inline]
+    fn try_lock_until(&self, timeout: Instant) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_for(&self, timeout: Duration) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+}
+
+impl RawMutex {
     // Used by Condvar when requeuing threads to us, must be called while
     // holding the queue lock.
     #[inline]
@@ -182,8 +215,7 @@
                     state | PARKED_BIT,
                     Ordering::Relaxed,
                     Ordering::Relaxed,
-                )
-                {
+                ) {
                     state = x;
                     continue;
                 }
@@ -233,7 +265,8 @@
     #[inline(never)]
     fn unlock_slow(&self, force_fair: bool) {
         // Unlock directly if there are no parked threads
-        if self.state
+        if self
+            .state
             .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
@@ -268,4 +301,12 @@
             parking_lot_core::unpark_one(addr, callback);
         }
     }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_slow(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.unlock_slow(true);
+        self.lock();
+    }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/raw_remutex.rs b/rustc_deps/vendor/parking_lot/src/raw_remutex.rs
deleted file mode 100644
index c8bc5bd..0000000
--- a/rustc_deps/vendor/parking_lot/src/raw_remutex.rs
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 Amanieu d'Antras
-//
-// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
-// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
-// http://opensource.org/licenses/MIT>, at your option. This file may not be
-// copied, modified, or distributed except according to those terms.
-
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::{Duration, Instant};
-use std::cell::Cell;
-use raw_mutex::RawMutex;
-
-// Helper function to get a thread id
-fn get_thread_id() -> usize {
-    // The address of a thread-local variable is guaranteed to be unique to the
-    // current thread, and is also guaranteed to be non-zero.
-    thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() });
-    KEY.with(|x| x as *const _ as usize)
-}
-
-pub struct RawReentrantMutex {
-    owner: AtomicUsize,
-    lock_count: Cell<usize>,
-    mutex: RawMutex,
-}
-
-unsafe impl Sync for RawReentrantMutex {}
-
-impl RawReentrantMutex {
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new() -> RawReentrantMutex {
-        RawReentrantMutex {
-            owner: AtomicUsize::new(0),
-            lock_count: Cell::new(0),
-            mutex: RawMutex::new(),
-        }
-    }
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> RawReentrantMutex {
-        RawReentrantMutex {
-            owner: AtomicUsize::new(0),
-            lock_count: Cell::new(0),
-            mutex: RawMutex::new(),
-        }
-    }
-
-    #[inline]
-    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
-        let id = get_thread_id();
-        if self.owner.load(Ordering::Relaxed) == id {
-            self.lock_count.set(
-                self.lock_count.get().checked_add(1).expect(
-                    "ReentrantMutex lock count overflow",
-                ),
-            );
-        } else {
-            if !try_lock() {
-                return false;
-            }
-            self.owner.store(id, Ordering::Relaxed);
-            self.lock_count.set(1);
-        }
-        true
-    }
-
-    #[inline]
-    pub fn lock(&self) {
-        self.lock_internal(|| {
-            self.mutex.lock();
-            true
-        });
-    }
-
-    #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> bool {
-        self.lock_internal(|| self.mutex.try_lock_until(timeout))
-    }
-
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> bool {
-        self.lock_internal(|| self.mutex.try_lock_for(timeout))
-    }
-
-    #[inline]
-    pub fn try_lock(&self) -> bool {
-        self.lock_internal(|| self.mutex.try_lock())
-    }
-
-    #[inline]
-    pub fn unlock(&self, force_fair: bool) {
-        let lock_count = self.lock_count.get() - 1;
-        if lock_count == 0 {
-            self.owner.store(0, Ordering::Relaxed);
-            self.mutex.unlock(force_fair);
-        } else {
-            self.lock_count.set(lock_count);
-        }
-    }
-}
diff --git a/rustc_deps/vendor/parking_lot/src/raw_rwlock.rs b/rustc_deps/vendor/parking_lot/src/raw_rwlock.rs
index a064840..f59a6ac 100644
--- a/rustc_deps/vendor/parking_lot/src/raw_rwlock.rs
+++ b/rustc_deps/vendor/parking_lot/src/raw_rwlock.rs
@@ -5,64 +5,105 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-#[cfg(feature = "nightly")]
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::cell::Cell;
-#[cfg(not(feature = "nightly"))]
-use stable::{AtomicUsize, Ordering};
-use std::time::{Duration, Instant};
-use parking_lot_core::{self, ParkResult, UnparkResult, SpinWait, ParkToken, FilterOp};
-use elision::{have_elision, AtomicElisionExt};
-use util::UncheckedOptionExt;
-use raw_mutex::{TOKEN_NORMAL, TOKEN_HANDOFF};
 use deadlock;
+use elision::{have_elision, AtomicElisionExt};
+use lock_api::{
+    GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair,
+    RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade,
+    RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed,
+};
+use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult};
+use raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL};
+use std::cell::Cell;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::{Duration, Instant};
+
+const PARKED_BIT: usize = 0b001;
+const UPGRADING_BIT: usize = 0b010;
+// A shared guard acquires a single guard resource
+const SHARED_GUARD: usize = 0b100;
+const GUARD_COUNT_MASK: usize = !(SHARED_GUARD - 1);
+// An exclusive lock acquires all of guard resource (i.e. it is exclusive)
+const EXCLUSIVE_GUARD: usize = GUARD_COUNT_MASK;
+// An upgradable lock acquires just over half of the guard resource
+// This should be (GUARD_COUNT_MASK + SHARED_GUARD) >> 1, however this might
+// overflow, so we shift before adding (which is okay since the least
+// significant bit is zero for both GUARD_COUNT_MASK and SHARED_GUARD)
+const UPGRADABLE_GUARD: usize = (GUARD_COUNT_MASK >> 1) + (SHARED_GUARD >> 1);
 
 // Token indicating what type of lock queued threads are trying to acquire
-const TOKEN_SHARED: ParkToken = ParkToken(0);
-const TOKEN_EXCLUSIVE: ParkToken = ParkToken(1);
+const TOKEN_SHARED: ParkToken = ParkToken(SHARED_GUARD);
+const TOKEN_EXCLUSIVE: ParkToken = ParkToken(EXCLUSIVE_GUARD);
+const TOKEN_UPGRADABLE: ParkToken = ParkToken(UPGRADABLE_GUARD);
+const TOKEN_UPGRADING: ParkToken = ParkToken((EXCLUSIVE_GUARD - UPGRADABLE_GUARD) | UPGRADING_BIT);
 
-const PARKED_BIT: usize = 1;
-const LOCKED_BIT: usize = 2;
-const SHARED_COUNT_MASK: usize = !3;
-const SHARED_COUNT_INC: usize = 4;
-const SHARED_COUNT_SHIFT: usize = 2;
-
+/// Raw reader-writer lock type backed by the parking lot.
 pub struct RawRwLock {
     state: AtomicUsize,
 }
 
-impl RawRwLock {
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new() -> RawRwLock {
-        RawRwLock { state: AtomicUsize::new(0) }
-    }
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> RawRwLock {
-        RawRwLock { state: AtomicUsize::new(0) }
-    }
+unsafe impl RawRwLockTrait for RawRwLock {
+    const INIT: RawRwLock = RawRwLock {
+        state: ATOMIC_USIZE_INIT,
+    };
+
+    type GuardMarker = GuardNoSend;
 
     #[inline]
-    pub fn lock_exclusive(&self) {
-        if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+    fn lock_exclusive(&self) {
+        if self
+            .state
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
             .is_err()
         {
-            self.lock_exclusive_slow(None);
+            let result = self.lock_exclusive_slow(None);
+            debug_assert!(result);
         }
         unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
-    pub fn try_lock_exclusive_until(&self, timeout: Instant) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+    fn try_lock_exclusive(&self) -> bool {
+        if self
+            .state
+            .compare_exchange(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
             true
         } else {
-            self.lock_exclusive_slow(Some(timeout))
+            false
+        }
+    }
+
+    #[inline]
+    fn unlock_exclusive(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self
+            .state
+            .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_exclusive_slow(false);
+    }
+
+    #[inline]
+    fn lock_shared(&self) {
+        if !self.try_lock_shared_fast(false) {
+            let result = self.lock_shared_slow(false, None);
+            debug_assert!(result);
+        }
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+    }
+
+    #[inline]
+    fn try_lock_shared(&self) -> bool {
+        let result = if self.try_lock_shared_fast(false) {
+            true
+        } else {
+            self.try_lock_shared_slow(false)
         };
         if result {
             unsafe { deadlock::acquire_resource(self as *const _ as usize) };
@@ -71,9 +112,150 @@
     }
 
     #[inline]
-    pub fn try_lock_exclusive_for(&self, timeout: Duration) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+    fn unlock_shared(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        let state = self.state.load(Ordering::Relaxed);
+        if state & PARKED_BIT == 0
+            || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
+        {
+            if have_elision() {
+                if self
+                    .state
+                    .elision_release(state, state - SHARED_GUARD)
+                    .is_ok()
+                {
+                    return;
+                }
+            } else {
+                if self
+                    .state
+                    .compare_exchange_weak(
+                        state,
+                        state - SHARED_GUARD,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    )
+                    .is_ok()
+                {
+                    return;
+                }
+            }
+        }
+        self.unlock_shared_slow(false);
+    }
+}
+
+unsafe impl RawRwLockFair for RawRwLock {
+    #[inline]
+    fn unlock_shared_fair(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        let state = self.state.load(Ordering::Relaxed);
+        if state & PARKED_BIT == 0
+            || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
+        {
+            if have_elision() {
+                if self
+                    .state
+                    .elision_release(state, state - SHARED_GUARD)
+                    .is_ok()
+                {
+                    return;
+                }
+            } else {
+                if self
+                    .state
+                    .compare_exchange_weak(
+                        state,
+                        state - SHARED_GUARD,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    )
+                    .is_ok()
+                {
+                    return;
+                }
+            }
+        }
+        self.unlock_shared_slow(true);
+    }
+
+    #[inline]
+    fn unlock_exclusive_fair(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self
+            .state
+            .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_exclusive_slow(true);
+    }
+
+    #[inline]
+    fn bump_shared(&self) {
+        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+            self.bump_shared_slow();
+        }
+    }
+
+    #[inline]
+    fn bump_exclusive(&self) {
+        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+            self.bump_exclusive_slow();
+        }
+    }
+}
+
+unsafe impl RawRwLockDowngrade for RawRwLock {
+    #[inline]
+    fn downgrade(&self) {
+        let state = self
+            .state
+            .fetch_sub(EXCLUSIVE_GUARD - SHARED_GUARD, Ordering::Release);
+
+        // Wake up parked shared and upgradable threads if there are any
+        if state & PARKED_BIT != 0 {
+            self.downgrade_slow();
+        }
+    }
+}
+
+unsafe impl RawRwLockTimed for RawRwLock {
+    type Duration = Duration;
+    type Instant = Instant;
+
+    #[inline]
+    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
+        let result = if self.try_lock_shared_fast(false) {
+            true
+        } else {
+            self.lock_shared_slow(false, Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
+        let result = if self.try_lock_shared_fast(false) {
+            true
+        } else {
+            self.lock_shared_slow(false, Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_exclusive_for(&self, timeout: Duration) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
             true
@@ -87,68 +269,275 @@
     }
 
     #[inline]
-    pub fn try_lock_exclusive(&self) -> bool {
-        if self.state
-            .compare_exchange(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+    fn try_lock_exclusive_until(&self, timeout: Instant) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
             true
         } else {
-            false
+            self.lock_exclusive_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
+        result
+    }
+}
+
+unsafe impl RawRwLockRecursive for RawRwLock {
+    #[inline]
+    fn lock_shared_recursive(&self) {
+        if !self.try_lock_shared_fast(true) {
+            let result = self.lock_shared_slow(true, None);
+            debug_assert!(result);
+        }
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
-    pub fn unlock_exclusive(&self, force_fair: bool) {
+    fn try_lock_shared_recursive(&self) -> bool {
+        let result = if self.try_lock_shared_fast(true) {
+            true
+        } else {
+            self.try_lock_shared_slow(true)
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+}
+
+unsafe impl RawRwLockRecursiveTimed for RawRwLock {
+    #[inline]
+    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
+        let result = if self.try_lock_shared_fast(true) {
+            true
+        } else {
+            self.lock_shared_slow(true, Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
+        let result = if self.try_lock_shared_fast(true) {
+            true
+        } else {
+            self.lock_shared_slow(true, Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+}
+
+unsafe impl RawRwLockUpgrade for RawRwLock {
+    #[inline]
+    fn lock_upgradable(&self) {
+        if !self.try_lock_upgradable_fast() {
+            let result = self.lock_upgradable_slow(None);
+            debug_assert!(result);
+        }
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+    }
+
+    #[inline]
+    fn try_lock_upgradable(&self) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.try_lock_upgradable_slow()
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn unlock_upgradable(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
-        if self.state
-            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+        if self
+            .state
+            .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
-        self.unlock_exclusive_slow(force_fair);
+        self.unlock_upgradable_slow(false);
     }
 
     #[inline]
-    pub fn downgrade(&self) {
-        let state = self.state.fetch_add(
-            SHARED_COUNT_INC - LOCKED_BIT,
-            Ordering::Release,
-        );
-
-        // Wake up parked shared threads if there are any
-        if state & PARKED_BIT != 0 {
-            self.downgrade_slow();
+    fn upgrade(&self) {
+        if self
+            .state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_err()
+        {
+            let result = self.upgrade_slow(None);
+            debug_assert!(result);
         }
     }
 
+    fn try_upgrade(&self) -> bool {
+        if self
+            .state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            true
+        } else {
+            self.try_upgrade_slow()
+        }
+    }
+}
+
+unsafe impl RawRwLockUpgradeFair for RawRwLock {
+    #[inline]
+    fn unlock_upgradable_fair(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self
+            .state
+            .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_upgradable_slow(true);
+    }
+
+    #[inline]
+    fn bump_upgradable(&self) {
+        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+            self.bump_upgradable_slow();
+        }
+    }
+}
+
+unsafe impl RawRwLockUpgradeDowngrade for RawRwLock {
+    #[inline]
+    fn downgrade_upgradable(&self) {
+        let state = self
+            .state
+            .fetch_sub(UPGRADABLE_GUARD - SHARED_GUARD, Ordering::Relaxed);
+
+        // Wake up parked shared and upgradable threads if there are any
+        if state & PARKED_BIT != 0 {
+            self.downgrade_upgradable_slow(state);
+        }
+    }
+
+    #[inline]
+    fn downgrade_to_upgradable(&self) {
+        let state = self
+            .state
+            .fetch_sub(EXCLUSIVE_GUARD - UPGRADABLE_GUARD, Ordering::Release);
+
+        // Wake up parked shared threads if there are any
+        if state & PARKED_BIT != 0 {
+            self.downgrade_to_upgradable_slow();
+        }
+    }
+}
+
+unsafe impl RawRwLockUpgradeTimed for RawRwLock {
+    #[inline]
+    fn try_lock_upgradable_until(&self, timeout: Instant) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.lock_upgradable_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_upgradable_for(&self, timeout: Duration) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.lock_upgradable_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_upgrade_until(&self, timeout: Instant) -> bool {
+        if self
+            .state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            true
+        } else {
+            self.upgrade_slow(Some(timeout))
+        }
+    }
+
+    #[inline]
+    fn try_upgrade_for(&self, timeout: Duration) -> bool {
+        if self
+            .state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            true
+        } else {
+            self.upgrade_slow(Some(Instant::now() + timeout))
+        }
+    }
+}
+
+impl RawRwLock {
     #[inline(always)]
     fn try_lock_shared_fast(&self, recursive: bool) -> bool {
         let state = self.state.load(Ordering::Relaxed);
 
-        if !recursive {
-            // Even if there are no exclusive locks, we can't allow grabbing a
-            // shared lock while there are parked threads since that could lead to
-            // writer starvation.
-            if state & (LOCKED_BIT | PARKED_BIT) != 0 {
-                return false;
-            }
-        } else {
-            // Allow acquiring a lock even if a thread is parked to avoid
-            // deadlocks for recursive read locks.
-            if state & LOCKED_BIT != 0 {
-                return false;
-            }
+        // We can't allow grabbing a shared lock while there are parked threads
+        // since that could lead to writer starvation.
+        if !recursive && state & PARKED_BIT != 0 {
+            return false;
         }
 
         // Use hardware lock elision to avoid cache conflicts when multiple
         // readers try to acquire the lock. We only do this if the lock is
         // completely empty since elision handles conflicts poorly.
         if have_elision() && state == 0 {
-            self.state.elision_acquire(0, SHARED_COUNT_INC).is_ok()
-        } else if let Some(new_state) = state.checked_add(SHARED_COUNT_INC) {
+            self.state.elision_acquire(0, SHARED_GUARD).is_ok()
+        } else if let Some(new_state) = state.checked_add(SHARED_GUARD) {
             self.state
                 .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
                 .is_ok()
@@ -157,80 +546,23 @@
         }
     }
 
-    #[inline]
-    pub fn lock_shared(&self, recursive: bool) {
-        if !self.try_lock_shared_fast(recursive) {
-            self.lock_shared_slow(recursive, None);
-        }
-        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-    }
-
-    #[inline]
-    pub fn try_lock_shared_until(&self, recursive: bool, timeout: Instant) -> bool {
-        let result = if self.try_lock_shared_fast(recursive) {
-            true
-        } else {
-            self.lock_shared_slow(recursive, Some(timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock_shared_for(&self, recursive: bool, timeout: Duration) -> bool {
-        let result = if self.try_lock_shared_fast(recursive) {
-            true
-        } else {
-            self.lock_shared_slow(recursive, Some(Instant::now() + timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock_shared(&self, recursive: bool) -> bool {
-        let result = if self.try_lock_shared_fast(recursive) {
-            true
-        } else {
-            self.try_lock_shared_slow(recursive)
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn unlock_shared(&self, force_fair: bool) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
+    #[inline(always)]
+    fn try_lock_upgradable_fast(&self) -> bool {
         let state = self.state.load(Ordering::Relaxed);
-        if state & PARKED_BIT == 0 || state & SHARED_COUNT_MASK != SHARED_COUNT_INC {
-            if have_elision() {
-                if self.state
-                    .elision_release(state, state - SHARED_COUNT_INC)
-                    .is_ok()
-                {
-                    return;
-                }
-            } else {
-                if self.state
-                    .compare_exchange_weak(
-                        state,
-                        state - SHARED_COUNT_INC,
-                        Ordering::Release,
-                        Ordering::Relaxed,
-                    )
-                    .is_ok()
-                {
-                    return;
-                }
-            }
+
+        // We can't allow grabbing an upgradable lock while there are parked threads
+        // since that could lead to writer starvation.
+        if state & PARKED_BIT != 0 {
+            return false;
         }
-        self.unlock_shared_slow(force_fair);
+
+        if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) {
+            self.state
+                .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
+                .is_ok()
+        } else {
+            false
+        }
     }
 
     #[cold]
@@ -241,10 +573,10 @@
         loop {
             // Grab the lock if it isn't locked, even if there are other
             // threads parked.
-            if state & (LOCKED_BIT | SHARED_COUNT_MASK) == 0 {
+            if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD) {
                 match self.state.compare_exchange_weak(
                     state,
-                    state | LOCKED_BIT,
+                    new_state,
                     Ordering::Acquire,
                     Ordering::Relaxed,
                 ) {
@@ -256,9 +588,8 @@
 
             // If there are no parked threads and only one reader or writer, try
             // spinning a few times.
-            if state & PARKED_BIT == 0 &&
-                (state & LOCKED_BIT != 0 || state & SHARED_COUNT_MASK == SHARED_COUNT_INC) &&
-                spinwait.spin()
+            if (state == EXCLUSIVE_GUARD || state == SHARED_GUARD || state == UPGRADABLE_GUARD)
+                && spinwait.spin()
             {
                 state = self.state.load(Ordering::Relaxed);
                 continue;
@@ -272,7 +603,7 @@
                     loop {
                         // If the rwlock is free, abort the park and try to grab
                         // it immediately.
-                        if state & (LOCKED_BIT | SHARED_COUNT_MASK) == 0 {
+                        if state & GUARD_COUNT_MASK == 0 {
                             return false;
                         }
 
@@ -333,64 +664,73 @@
     #[inline(never)]
     fn unlock_exclusive_slow(&self, force_fair: bool) {
         // Unlock directly if there are no parked threads
-        if self.state
-            .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+        if self
+            .state
+            .compare_exchange(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
-        }
+        };
 
-        // There are threads to unpark. We can unpark a single exclusive
-        // thread or many shared threads.
-        let first_token = Cell::new(None);
+        // There are threads to unpark. We unpark threads up to the guard capacity.
+        let guard_count = Cell::new(0usize);
         unsafe {
             let addr = self as *const _ as usize;
-            let filter = |token| -> FilterOp {
-                if let Some(first_token) = first_token.get() {
-                    if first_token == TOKEN_EXCLUSIVE || token == TOKEN_EXCLUSIVE {
-                        FilterOp::Stop
-                    } else {
+            let filter = |ParkToken(token)| -> FilterOp {
+                match guard_count.get().checked_add(token) {
+                    Some(new_guard_count) => {
+                        guard_count.set(new_guard_count);
                         FilterOp::Unpark
                     }
-                } else {
-                    first_token.set(Some(token));
-                    FilterOp::Unpark
+                    None => FilterOp::Stop,
                 }
             };
             let callback = |result: UnparkResult| {
                 // If we are using a fair unlock then we should keep the
                 // rwlock locked and hand it off to the unparked threads.
                 if result.unparked_threads != 0 && (force_fair || result.be_fair) {
-                    if first_token.get().unchecked_unwrap() == TOKEN_EXCLUSIVE {
-                        // If we unparked an exclusive thread, just clear the
-                        // parked bit if there are no more parked threads.
-                        if !result.have_more_threads {
-                            self.state.store(LOCKED_BIT, Ordering::Relaxed);
-                        }
-                    } else {
-                        // If we unparked shared threads then we need to set
-                        // the shared count accordingly.
-                        if result.have_more_threads {
-                            self.state.store(
-                                (result.unparked_threads << SHARED_COUNT_SHIFT) | PARKED_BIT,
-                                Ordering::Release,
-                            );
-                        } else {
-                            self.state.store(
-                                result.unparked_threads << SHARED_COUNT_SHIFT,
-                                Ordering::Release,
-                            );
-                        }
-                    }
-                    return TOKEN_HANDOFF;
-                }
+                    // We need to set the guard count accordingly.
+                    let mut new_state = guard_count.get();
 
-                // Clear the locked bit, and the parked bit as well if there
-                // are no more parked threads.
-                if result.have_more_threads {
-                    self.state.store(PARKED_BIT, Ordering::Release);
+                    if result.have_more_threads {
+                        new_state |= PARKED_BIT;
+                    }
+
+                    self.state.store(new_state, Ordering::Release);
+                    TOKEN_HANDOFF
                 } else {
-                    self.state.store(0, Ordering::Release);
+                    // Clear the parked bit if there are no more parked threads.
+                    if result.have_more_threads {
+                        self.state.store(PARKED_BIT, Ordering::Release);
+                    } else {
+                        self.state.store(0, Ordering::Release);
+                    }
+                    TOKEN_NORMAL
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn downgrade_slow(&self) {
+        unsafe {
+            let addr = self as *const _ as usize;
+            let mut guard_count = SHARED_GUARD;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match guard_count.checked_add(token) {
+                    Some(new_guard_count) => {
+                        guard_count = new_guard_count;
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                // Clear the parked bit if there no more parked threads
+                if !result.have_more_threads {
+                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                 }
                 TOKEN_NORMAL
             };
@@ -400,15 +740,17 @@
 
     #[cold]
     #[inline(never)]
-    fn downgrade_slow(&self) {
-        // Unpark shared threads only
+    fn downgrade_to_upgradable_slow(&self) {
         unsafe {
             let addr = self as *const _ as usize;
-            let filter = |token| -> FilterOp {
-                if token == TOKEN_SHARED {
-                    FilterOp::Unpark
-                } else {
-                    FilterOp::Stop
+            let mut guard_count = UPGRADABLE_GUARD;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match guard_count.checked_add(token) {
+                    Some(new_guard_count) => {
+                        guard_count = new_guard_count;
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
                 }
             };
             let callback = |result: UnparkResult| {
@@ -434,7 +776,7 @@
             // readers try to acquire the lock. We only do this if the lock is
             // completely empty since elision handles conflicts poorly.
             if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_COUNT_INC) {
+                match self.state.elision_acquire(0, SHARED_GUARD) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
@@ -443,23 +785,31 @@
             // Grab the lock if there are no exclusive threads locked or
             // waiting. However if we were unparked then we are allowed to grab
             // the lock even if there are pending exclusive threads.
-            if state & LOCKED_BIT == 0 && (unparked || recursive || state & PARKED_BIT == 0) {
-                let new = state.checked_add(SHARED_COUNT_INC).expect(
-                    "RwLock shared count overflow",
-                );
-                if self.state
-                    .compare_exchange_weak(state, new, Ordering::Acquire, Ordering::Relaxed)
-                    .is_ok()
-                {
-                    return true;
-                }
+            if unparked || recursive || state & PARKED_BIT == 0 {
+                if let Some(new_state) = state.checked_add(SHARED_GUARD) {
+                    if self
+                        .state
+                        .compare_exchange_weak(
+                            state,
+                            new_state,
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
 
-                // If there is high contention on the reader count then we want
-                // to leave some time between attempts to acquire the lock to
-                // let other threads make progress.
-                spinwait_shared.spin_no_yield();
-                state = self.state.load(Ordering::Relaxed);
-                continue;
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    state = self.state.load(Ordering::Relaxed);
+                    continue;
+                } else {
+                    // We were unparked spuriously, reset unparked flag.
+                    unparked = false;
+                }
             }
 
             // If there are no parked threads, try spinning a few times
@@ -480,10 +830,10 @@
                         }
 
                         // If the parked bit is not set then it means we are at
-                        // the front of the queue. If there is no exclusive lock
-                        // then we should abort the park and try acquiring the
-                        // lock again.
-                        if state & LOCKED_BIT == 0 {
+                        // the front of the queue. If there is space for another
+                        // lock then we should abort the park and try acquiring
+                        // the lock again.
+                        if state & GUARD_COUNT_MASK != GUARD_COUNT_MASK {
                             return false;
                         }
 
@@ -542,31 +892,26 @@
     fn try_lock_shared_slow(&self, recursive: bool) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
-            let mask = if recursive {
-                LOCKED_BIT
-            } else {
-                LOCKED_BIT | PARKED_BIT
-            };
-            if state & mask != 0 {
+            if !recursive && state & PARKED_BIT != 0 {
                 return false;
             }
             if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_COUNT_INC) {
+                match self.state.elision_acquire(0, SHARED_GUARD) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
             } else {
-                let new = state.checked_add(SHARED_COUNT_INC).expect(
-                    "RwLock shared count overflow",
-                );
-                match self.state.compare_exchange_weak(
-                    state,
-                    new,
-                    Ordering::Acquire,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
+                match state.checked_add(SHARED_GUARD) {
+                    Some(new_state) => match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Acquire,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return true,
+                        Err(x) => state = x,
+                    },
+                    None => return false,
                 }
             }
         }
@@ -579,10 +924,14 @@
         loop {
             // Just release the lock if there are no parked thread or if we are
             // not the last shared thread.
-            if state & PARKED_BIT == 0 || state & SHARED_COUNT_MASK != SHARED_COUNT_INC {
+            if state & PARKED_BIT == 0
+                || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
+                || (state & UPGRADING_BIT != 0
+                    && state & GUARD_COUNT_MASK != UPGRADABLE_GUARD + SHARED_GUARD)
+            {
                 match self.state.compare_exchange_weak(
                     state,
-                    state - SHARED_COUNT_INC,
+                    state - SHARED_GUARD,
                     Ordering::Release,
                     Ordering::Relaxed,
                 ) {
@@ -592,67 +941,462 @@
                 continue;
             }
 
-            // There are threads to unpark. We can unpark a single exclusive
-            // thread or many shared threads. Note that there is a potential
-            // race condition here: another thread might grab a shared lock
-            // between now and when we actually release our lock.
-            let first_token = Cell::new(None);
-            unsafe {
-                let addr = self as *const _ as usize;
-                let filter = |token| -> FilterOp {
-                    if let Some(first_token) = first_token.get() {
-                        if first_token == TOKEN_EXCLUSIVE || token == TOKEN_EXCLUSIVE {
-                            FilterOp::Stop
-                        } else {
+            break;
+        }
+
+        // There are threads to unpark. If there is a thread waiting to be
+        // upgraded, we find that thread and let it upgrade, otherwise we
+        // unpark threads up to the guard capacity. Note that there is a
+        // potential race condition here: another thread might grab a shared
+        // lock between now and when we actually release our lock.
+        let additional_guards = Cell::new(0usize);
+        let has_upgraded = Cell::new(if state & UPGRADING_BIT == 0 {
+            None
+        } else {
+            Some(false)
+        });
+        unsafe {
+            let addr = self as *const _ as usize;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match has_upgraded.get() {
+                    None => match additional_guards.get().checked_add(token) {
+                        Some(x) => {
+                            additional_guards.set(x);
                             FilterOp::Unpark
                         }
-                    } else {
-                        first_token.set(Some(token));
+                        None => FilterOp::Stop,
+                    },
+                    Some(false) => if token & UPGRADING_BIT != 0 {
+                        additional_guards.set(token & !UPGRADING_BIT);
+                        has_upgraded.set(Some(true));
                         FilterOp::Unpark
+                    } else {
+                        FilterOp::Skip
+                    },
+                    Some(true) => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                let mut state = self.state.load(Ordering::Relaxed);
+                loop {
+                    // Release our shared lock
+                    let mut new_state = state - SHARED_GUARD;
+
+                    // Clear the parked bit if there are no more threads in
+                    // the queue.
+                    if !result.have_more_threads {
+                        new_state &= !PARKED_BIT;
                     }
-                };
-                let callback = |result: UnparkResult| {
+
+                    // Clear the upgrading bit if we are upgrading a thread.
+                    if let Some(true) = has_upgraded.get() {
+                        new_state &= !UPGRADING_BIT;
+                    }
+
+                    // Consider using fair unlocking. If we are, then we should set
+                    // the state to the new value and tell the threads that we are
+                    // handing the lock directly.
+                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
+                        match new_state.checked_add(additional_guards.get()) {
+                            Some(x) => {
+                                new_state = x;
+                                TOKEN_HANDOFF
+                            }
+                            None => TOKEN_NORMAL,
+                        }
+                    } else {
+                        TOKEN_NORMAL
+                    };
+
+                    match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return token,
+                        Err(x) => state = x,
+                    }
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool {
+        let mut spinwait = SpinWait::new();
+        let mut spinwait_shared = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        let mut unparked = false;
+        loop {
+            // Grab the lock if there are no exclusive or upgradable threads
+            // locked or waiting. However if we were unparked then we are
+            // allowed to grab the lock even if there are pending exclusive threads.
+            if unparked || state & PARKED_BIT == 0 {
+                if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) {
+                    if self
+                        .state
+                        .compare_exchange_weak(
+                            state,
+                            new_state,
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
+
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    state = self.state.load(Ordering::Relaxed);
+                    continue;
+                } else {
+                    // We were unparked spuriously, reset unparked flag.
+                    unparked = false;
+                }
+            }
+
+            // If there are no parked threads, try spinning a few times
+            if state & PARKED_BIT == 0 && spinwait.spin() {
+                state = self.state.load(Ordering::Relaxed);
+                continue;
+            }
+
+            // Park our thread until we are woken up by an unlock
+            unsafe {
+                let addr = self as *const _ as usize;
+                let validate = || {
                     let mut state = self.state.load(Ordering::Relaxed);
                     loop {
-                        // Release our shared lock
-                        let mut new = state - SHARED_COUNT_INC;
-
-                        // Clear the parked bit if there are no more threads in
-                        // the queue
-                        if !result.have_more_threads {
-                            new &= !PARKED_BIT;
+                        // Nothing to do if the parked bit is already set
+                        if state & PARKED_BIT != 0 {
+                            return true;
                         }
 
-                        // If we are the last shared thread and we unparked an
-                        // exclusive thread then we can consider using fair
-                        // unlocking. If we are then we should set the exclusive
-                        // locked bit and tell the thread that we are handing it
-                        // the lock directly.
-                        let token = if result.unparked_threads != 0 &&
-                            new & SHARED_COUNT_MASK == 0 &&
-                            first_token.get().unchecked_unwrap() == TOKEN_EXCLUSIVE &&
-                            (force_fair || result.be_fair)
-                        {
-                            new |= LOCKED_BIT;
-                            TOKEN_HANDOFF
-                        } else {
-                            TOKEN_NORMAL
-                        };
+                        // If the parked bit is not set then it means we are at
+                        // the front of the queue. If there is space for an
+                        // upgradable lock then we should abort the park and try
+                        // acquiring the lock again.
+                        if state & UPGRADABLE_GUARD != UPGRADABLE_GUARD {
+                            return false;
+                        }
 
+                        // Set the parked bit
                         match self.state.compare_exchange_weak(
                             state,
-                            new,
-                            Ordering::Release,
+                            state | PARKED_BIT,
+                            Ordering::Relaxed,
                             Ordering::Relaxed,
                         ) {
-                            Ok(_) => return token,
+                            Ok(_) => return true,
                             Err(x) => state = x,
                         }
                     }
                 };
-                parking_lot_core::unpark_filter(addr, filter, callback);
+                let before_sleep = || {};
+                let timed_out = |_, was_last_thread| {
+                    // Clear the parked bit if we were the last parked thread
+                    if was_last_thread {
+                        self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+                    }
+                };
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_UPGRADABLE,
+                    timeout,
+                ) {
+                    // The thread that unparked us passed the lock on to us
+                    // directly without unlocking it.
+                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
+
+                    // We were unparked normally, try acquiring the lock again
+                    ParkResult::Unparked(_) => (),
+
+                    // The validation function failed, try locking again
+                    ParkResult::Invalid => (),
+
+                    // Timeout expired
+                    ParkResult::TimedOut => return false,
+                }
             }
+
+            // Loop back and try locking again
+            spinwait.reset();
+            spinwait_shared.reset();
+            state = self.state.load(Ordering::Relaxed);
+            unparked = true;
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn try_lock_upgradable_slow(&self) -> bool {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            if state & PARKED_BIT != 0 {
+                return false;
+            }
+
+            match state.checked_add(UPGRADABLE_GUARD) {
+                Some(new_state) => match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                },
+                None => return false,
+            }
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn unlock_upgradable_slow(&self, force_fair: bool) {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // Just release the lock if there are no parked threads.
+            if state & PARKED_BIT == 0 {
+                match self.state.compare_exchange_weak(
+                    state,
+                    state - UPGRADABLE_GUARD,
+                    Ordering::Release,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return,
+                    Err(x) => state = x,
+                }
+                continue;
+            }
+
             break;
         }
+
+        // There are threads to unpark. We unpark threads up to the guard capacity.
+        let additional_guards = Cell::new(0usize);
+        unsafe {
+            let addr = self as *const _ as usize;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match additional_guards.get().checked_add(token) {
+                    Some(x) => {
+                        additional_guards.set(x);
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                let mut state = self.state.load(Ordering::Relaxed);
+                loop {
+                    // Release our upgradable lock
+                    let mut new_state = state - UPGRADABLE_GUARD;
+
+                    // Clear the parked bit if there are no more threads in
+                    // the queue
+                    if !result.have_more_threads {
+                        new_state &= !PARKED_BIT;
+                    }
+
+                    // Consider using fair unlocking. If we are, then we should set
+                    // the state to the new value and tell the threads that we are
+                    // handing the lock directly.
+                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
+                        match new_state.checked_add(additional_guards.get()) {
+                            Some(x) => {
+                                new_state = x;
+                                TOKEN_HANDOFF
+                            }
+                            None => TOKEN_NORMAL,
+                        }
+                    } else {
+                        TOKEN_NORMAL
+                    };
+
+                    match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return token,
+                        Err(x) => state = x,
+                    }
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn downgrade_upgradable_slow(&self, state: usize) {
+        unsafe {
+            let addr = self as *const _ as usize;
+            let mut guard_count = (state & GUARD_COUNT_MASK) - UPGRADABLE_GUARD;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match guard_count.checked_add(token) {
+                    Some(x) => {
+                        guard_count = x;
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                // Clear the parked bit if there no more parked threads
+                if !result.have_more_threads {
+                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+                }
+                TOKEN_NORMAL
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn try_upgrade_slow(&self) -> bool {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            match state.checked_add(EXCLUSIVE_GUARD - SHARED_GUARD) {
+                Some(new_state) => match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Relaxed,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                },
+                None => return false,
+            }
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn upgrade_slow(&self, timeout: Option<Instant>) -> bool {
+        let mut spinwait = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // Grab the lock if it isn't locked, even if there are other
+            // threads parked.
+            if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD - UPGRADABLE_GUARD) {
+                match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                }
+                continue;
+            }
+
+            // If there are no parked threads and only one other reader, try
+            // spinning a few times.
+            if state == UPGRADABLE_GUARD | SHARED_GUARD && spinwait.spin() {
+                state = self.state.load(Ordering::Relaxed);
+                continue;
+            }
+
+            // Park our thread until we are woken up by an unlock
+            unsafe {
+                let addr = self as *const _ as usize;
+                let validate = || {
+                    let mut state = self.state.load(Ordering::Relaxed);
+                    loop {
+                        // If the rwlock is free, abort the park and try to grab
+                        // it immediately.
+                        if state & GUARD_COUNT_MASK == UPGRADABLE_GUARD {
+                            return false;
+                        }
+
+                        // Set the upgrading and parked bits
+                        match self.state.compare_exchange_weak(
+                            state,
+                            state | (UPGRADING_BIT | PARKED_BIT),
+                            Ordering::Relaxed,
+                            Ordering::Relaxed,
+                        ) {
+                            Ok(_) => return true,
+                            Err(x) => state = x,
+                        }
+                    }
+                };
+                let before_sleep = || {};
+                let timed_out = |_, was_last_thread| {
+                    // Clear the upgrading bit
+                    let mut flags = UPGRADING_BIT;
+
+                    // Clear the parked bit if we were the last parked thread
+                    if was_last_thread {
+                        flags |= PARKED_BIT;
+                    }
+
+                    self.state.fetch_and(!flags, Ordering::Relaxed);
+                };
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_UPGRADING,
+                    timeout,
+                ) {
+                    // The thread that unparked us passed the lock on to us
+                    // directly without unlocking it.
+                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
+
+                    // We were unparked normally, try acquiring the lock again
+                    ParkResult::Unparked(_) => (),
+
+                    // The validation function failed, try locking again
+                    ParkResult::Invalid => (),
+
+                    // Timeout expired
+                    ParkResult::TimedOut => return false,
+                }
+            }
+
+            // Loop back and try locking again
+            spinwait.reset();
+            state = self.state.load(Ordering::Relaxed);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_shared_slow(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.unlock_shared_slow(true);
+        self.lock_shared();
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_exclusive_slow(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.unlock_exclusive_slow(true);
+        self.lock_exclusive();
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_upgradable_slow(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.unlock_upgradable_slow(true);
+        self.lock_upgradable();
     }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/remutex.rs b/rustc_deps/vendor/parking_lot/src/remutex.rs
index 816eec6..ba9e004 100644
--- a/rustc_deps/vendor/parking_lot/src/remutex.rs
+++ b/rustc_deps/vendor/parking_lot/src/remutex.rs
@@ -5,16 +5,22 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::UnsafeCell;
-use std::ops::Deref;
-use std::time::{Duration, Instant};
-use std::fmt;
-use std::mem;
-use std::marker::PhantomData;
-use raw_remutex::RawReentrantMutex;
+use lock_api::{self, GetThreadId};
+use raw_mutex::RawMutex;
 
-#[cfg(feature = "owning_ref")]
-use owning_ref::StableAddress;
+/// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`.
+pub struct RawThreadId;
+
+unsafe impl GetThreadId for RawThreadId {
+    const INIT: RawThreadId = RawThreadId;
+
+    fn nonzero_thread_id(&self) -> usize {
+        // The address of a thread-local variable is guaranteed to be unique to the
+        // current thread, and is also guaranteed to be non-zero.
+        thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() });
+        KEY.with(|x| x as *const _ as usize)
+    }
+}
 
 /// A mutex which can be recursively locked by a single thread.
 ///
@@ -24,237 +30,28 @@
 ///   deadlocking.
 /// - `ReentrantMutexGuard` does not give mutable references to the locked data.
 ///   Use a `RefCell` if you need this.
-/// - `ReentrantMutexGuard` is not `Send`.
 ///
 /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
 /// primitive.
-pub struct ReentrantMutex<T: ?Sized> {
-    raw: RawReentrantMutex,
-    data: UnsafeCell<T>,
-}
-
-unsafe impl<T: Send> Send for ReentrantMutex<T> {}
-unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
+pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>;
 
 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
 /// is dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` implementation.
-#[must_use]
-pub struct ReentrantMutexGuard<'a, T: ?Sized + 'a> {
-    mutex: &'a ReentrantMutex<T>,
+pub type ReentrantMutexGuard<'a, T> =
+    lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
 
-    // The raw pointer here ensures that ReentrantMutexGuard is !Send
-    marker: PhantomData<(&'a T, *mut ())>,
-}
-
-unsafe impl<'a, T: ?Sized + 'a + Sync> Sync for ReentrantMutexGuard<'a, T> {}
-
-impl<T> ReentrantMutex<T> {
-    /// Creates a new reentrant mutex in an unlocked state ready for use.
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new(val: T) -> ReentrantMutex<T> {
-        ReentrantMutex {
-            data: UnsafeCell::new(val),
-            raw: RawReentrantMutex::new(),
-        }
-    }
-
-    /// Creates a new reentrant mutex in an unlocked state ready for use.
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new(val: T) -> ReentrantMutex<T> {
-        ReentrantMutex {
-            data: UnsafeCell::new(val),
-            raw: RawReentrantMutex::new(),
-        }
-    }
-
-    /// Consumes this reentrant mutex, returning the underlying data.
-    #[inline]
-    pub fn into_inner(self) -> T {
-        unsafe { self.data.into_inner() }
-    }
-}
-
-impl<T: ?Sized> ReentrantMutex<T> {
-    /// Acquires a reentrant mutex, blocking the current thread until it is able
-    /// to do so.
-    ///
-    /// If the mutex is held by another thread then this function will block the
-    /// local thread until it is available to acquire the mutex. If the mutex is
-    /// already held by the current thread then this function will increment the
-    /// lock reference count and return immediately. Upon returning,
-    /// the thread is the only thread with the mutex held. An RAII guard is
-    /// returned to allow scoped unlock of the lock. When the guard goes out of
-    /// scope, the mutex will be unlocked.
-    #[inline]
-    pub fn lock(&self) -> ReentrantMutexGuard<T> {
-        self.raw.lock();
-        ReentrantMutexGuard::new(self)
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// If the lock could not be acquired at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
-    /// guard is dropped.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<T>> {
-        if self.raw.try_lock() {
-            Some(ReentrantMutexGuard::new(self))
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> Option<ReentrantMutexGuard<T>> {
-        if self.raw.try_lock_for(timeout) {
-            Some(ReentrantMutexGuard::new(self))
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> Option<ReentrantMutexGuard<T>> {
-        if self.raw.try_lock_until(timeout) {
-            Some(ReentrantMutexGuard::new(self))
-        } else {
-            None
-        }
-    }
-
-    /// Returns a mutable reference to the underlying data.
-    ///
-    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
-    /// take place---the mutable borrow statically guarantees no locks exist.
-    #[inline]
-    pub fn get_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.data.get() }
-    }
-
-    /// Releases the mutex.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `ReentrantMutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock(&self) {
-        self.raw.unlock(false);
-    }
-
-    /// Releases the mutex using a fair unlock protocol.
-    ///
-    /// See `ReentrantMutexGuard::unlock_fair`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `ReentrantMutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock_fair(&self) {
-        self.raw.unlock(true);
-    }
-}
-impl ReentrantMutex<()> {
-    /// Acquires a mutex, blocking the current thread until it is able to do so.
-    ///
-    /// This is similar to `lock`, except that a `ReentrantMutexGuard` is not returned.
-    /// Instead you will need to call `raw_unlock` to release the mutex.
-    #[inline]
-    pub fn raw_lock(&self) {
-        self.raw.lock();
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// This is similar to `try_lock`, except that a `ReentrantMutexGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// mutex.
-    #[inline]
-    pub fn raw_try_lock(&self) -> bool {
-        self.raw.try_lock()
-    }
-}
-
-impl<T: ?Sized + Default> Default for ReentrantMutex<T> {
-    #[inline]
-    fn default() -> ReentrantMutex<T> {
-        ReentrantMutex::new(Default::default())
-    }
-}
-
-impl<T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.try_lock() {
-            Some(guard) => write!(f, "ReentrantMutex {{ data: {:?} }}", &*guard),
-            None => write!(f, "ReentrantMutex {{ <locked> }}"),
-        }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, T> {
-    fn new(mutex: &'a ReentrantMutex<T>) -> Self {
-        ReentrantMutexGuard {
-            mutex: mutex,
-            marker: PhantomData,
-        }
-    }
-    /// Unlocks the mutex using a fair unlock protocol.
-    ///
-    /// By default, mutexes are unfair and allow the current thread to re-lock
-    /// the mutex before another has the chance to acquire the lock, even if
-    /// that thread has been blocked on the mutex for a long time. This is the
-    /// default because it allows much higher throughput as it avoids forcing a
-    /// context switch on every mutex unlock. This can result in one thread
-    /// acquiring a mutex many more times than other threads.
-    ///
-    /// However in some cases it can be beneficial to ensure fairness by forcing
-    /// the lock to pass on to a waiting thread if there is one. This is done by
-    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
-    #[inline]
-    pub fn unlock_fair(self) {
-        self.mutex.raw.unlock(true);
-        mem::forget(self);
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Deref for ReentrantMutexGuard<'a, T> {
-    type Target = T;
-    #[inline]
-    fn deref(&self) -> &T {
-        unsafe { &*self.mutex.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Drop for ReentrantMutexGuard<'a, T> {
-    #[inline]
-    fn drop(&mut self) {
-        self.mutex.raw.unlock(false);
-    }
-}
-
-#[cfg(feature = "owning_ref")]
-unsafe impl<'a, T: ?Sized> StableAddress for ReentrantMutexGuard<'a, T> {}
+/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedReentrantMutexGuard<'a, T> =
+    lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
 
 #[cfg(test)]
 mod tests {
@@ -310,4 +107,20 @@
             .unwrap();
         let _lock3 = m.try_lock();
     }
+
+    #[test]
+    fn test_reentrant_mutex_debug() {
+        let mutex = ReentrantMutex::new(vec![0u8, 10]);
+
+        assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }");
+        assert_eq!(
+            format!("{:#?}", mutex),
+            "ReentrantMutex {
+    data: [
+        0,
+        10
+    ]
+}"
+        );
+    }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/rwlock.rs b/rustc_deps/vendor/parking_lot/src/rwlock.rs
index f93b27d..5a1d0cb 100644
--- a/rustc_deps/vendor/parking_lot/src/rwlock.rs
+++ b/rustc_deps/vendor/parking_lot/src/rwlock.rs
@@ -5,17 +5,8 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::UnsafeCell;
-use std::ops::{Deref, DerefMut};
-use std::time::{Duration, Instant};
-use std::fmt;
-use std::mem;
-use std::marker::PhantomData;
+use lock_api;
 use raw_rwlock::RawRwLock;
-use deadlock::DeadlockDetectionMarker;
-
-#[cfg(feature = "owning_ref")]
-use owning_ref::StableAddress;
 
 /// A reader-writer lock
 ///
@@ -64,7 +55,6 @@
 /// - No poisoning, the lock is released normally on panic.
 /// - Only requires 1 word of space, whereas the standard library boxes the
 ///   `RwLock` due to platform limitations.
-/// - A lock guard can be sent to another thread and unlocked there.
 /// - Can be statically constructed (requires the `const_fn` nightly feature).
 /// - Does not require any drop glue when dropped.
 /// - Inline fast path for the uncontended case.
@@ -96,583 +86,49 @@
 ///     assert_eq!(*w, 6);
 /// } // write lock is dropped here
 /// ```
-pub struct RwLock<T: ?Sized> {
-    raw: RawRwLock,
-    data: UnsafeCell<T>,
-}
-
-unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
-unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
+pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
 
 /// RAII structure used to release the shared read access of a lock when
 /// dropped.
-#[must_use]
-pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-    rwlock: &'a RwLock<T>,
-    marker: PhantomData<(&'a T, DeadlockDetectionMarker)>,
-}
+pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
 
 /// RAII structure used to release the exclusive write access of a lock when
 /// dropped.
-#[must_use]
-pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
-    rwlock: &'a RwLock<T>,
-    marker: PhantomData<(&'a mut T, DeadlockDetectionMarker)>,
-}
+pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
 
-impl<T> RwLock<T> {
-    /// Creates a new instance of an `RwLock<T>` which is unlocked.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use parking_lot::RwLock;
-    ///
-    /// let lock = RwLock::new(5);
-    /// ```
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new(val: T) -> RwLock<T> {
-        RwLock {
-            data: UnsafeCell::new(val),
-            raw: RawRwLock::new(),
-        }
-    }
+/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
 
-    /// Creates a new instance of an `RwLock<T>` which is unlocked.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use parking_lot::RwLock;
-    ///
-    /// let lock = RwLock::new(5);
-    /// ```
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new(val: T) -> RwLock<T> {
-        RwLock {
-            data: UnsafeCell::new(val),
-            raw: RawRwLock::new(),
-        }
-    }
+/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
 
-    /// Consumes this `RwLock`, returning the underlying data.
-    #[inline]
-    pub fn into_inner(self) -> T {
-        unsafe { self.data.into_inner() }
-    }
-}
-
-impl<T: ?Sized> RwLock<T> {
-    /// Locks this rwlock with shared read access, blocking the current thread
-    /// until it can be acquired.
-    ///
-    /// The calling thread will be blocked until there are no more writers which
-    /// hold the lock. There may be other readers currently inside the lock when
-    /// this method returns.
-    ///
-    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
-    /// the current thread already holds one may result in a deadlock.
-    ///
-    /// Returns an RAII guard which will release this thread's shared access
-    /// once it is dropped.
-    #[inline]
-    pub fn read(&self) -> RwLockReadGuard<T> {
-        self.raw.lock_shared(false);
-        RwLockReadGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access.
-    ///
-    /// If the access could not be granted at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned which will release the shared access
-    /// when it is dropped.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared(false) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access until a timeout
-    /// is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the shared access when it is dropped.
-    #[inline]
-    pub fn try_read_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared_for(false, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access until a timeout
-    /// is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the shared access when it is dropped.
-    #[inline]
-    pub fn try_read_until(&self, timeout: Instant) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared_until(false, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Locks this rwlock with shared read access, blocking the current thread
-    /// until it can be acquired.
-    ///
-    /// The calling thread will be blocked until there are no more writers which
-    /// hold the lock. There may be other readers currently inside the lock when
-    /// this method returns.
-    ///
-    /// Unlike `read`, this method is guaranteed to succeed without blocking if
-    /// another read lock is held at the time of the call. This allows a thread
-    /// to recursively lock a `RwLock`. However using this method can cause
-    /// writers to starve since readers no longer block if a writer is waiting
-    /// for the lock.
-    ///
-    /// Returns an RAII guard which will release this thread's shared access
-    /// once it is dropped.
-    #[inline]
-    pub fn read_recursive(&self) -> RwLockReadGuard<T> {
-        self.raw.lock_shared(true);
-        RwLockReadGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access.
-    ///
-    /// If the access could not be granted at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned which will release the shared access
-    /// when it is dropped.
-    ///
-    /// This method is guaranteed to succeed if another read lock is held at the
-    /// time of the call. See the documentation for `read_recursive` for details.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared(true) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access until a timeout
-    /// is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the shared access when it is dropped.
-    ///
-    /// This method is guaranteed to succeed without blocking if another read
-    /// lock is held at the time of the call. See the documentation for
-    /// `read_recursive` for details.
-    #[inline]
-    pub fn try_read_recursive_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared_for(true, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with shared read access until a timeout
-    /// is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the shared access when it is dropped.
-    #[inline]
-    pub fn try_read_recursive_until(&self, timeout: Instant) -> Option<RwLockReadGuard<T>> {
-        if self.raw.try_lock_shared_until(true, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Locks this rwlock with exclusive write access, blocking the current
-    /// thread until it can be acquired.
-    ///
-    /// This function will not return while other writers or other readers
-    /// currently have access to the lock.
-    ///
-    /// Returns an RAII guard which will drop the write access of this rwlock
-    /// when dropped.
-    #[inline]
-    pub fn write(&self) -> RwLockWriteGuard<T> {
-        self.raw.lock_exclusive();
-        RwLockWriteGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
-    }
-
-    /// Attempts to lock this rwlock with exclusive write access.
-    ///
-    /// If the lock could not be acquired at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned which will release the lock when
-    /// it is dropped.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
-        if self.raw.try_lock_exclusive() {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with exclusive write access until a
-    /// timeout is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the exclusive access when it is dropped.
-    #[inline]
-    pub fn try_write_for(&self, timeout: Duration) -> Option<RwLockWriteGuard<T>> {
-        if self.raw.try_lock_exclusive_for(timeout) {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this rwlock with exclusive write access until a
-    /// timeout is reached.
-    ///
-    /// If the access could not be granted before the timeout expires, then
-    /// `None` is returned. Otherwise, an RAII guard is returned which will
-    /// release the exclusive access when it is dropped.
-    #[inline]
-    pub fn try_write_until(&self, timeout: Instant) -> Option<RwLockWriteGuard<T>> {
-        if self.raw.try_lock_exclusive_until(timeout) {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
-        } else {
-            None
-        }
-    }
-
-    /// Returns a mutable reference to the underlying data.
-    ///
-    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
-    /// take place---the mutable borrow statically guarantees no locks exist.
-    #[inline]
-    pub fn get_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.data.get() }
-    }
-
-
-    /// Releases shared read access of the rwlock.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the rwlock was locked using
-    /// `raw_read` or `raw_try_read`, or if an `RwLockReadGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with shared read access.
-    #[inline]
-    pub unsafe fn raw_unlock_read(&self) {
-        self.raw.unlock_shared(false);
-    }
-
-    /// Releases exclusive write access of the rwlock.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the rwlock was locked using
-    /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with exclusive write access.
-    #[inline]
-    pub unsafe fn raw_unlock_write(&self) {
-        self.raw.unlock_exclusive(false);
-    }
-
-    /// Releases shared read access of the rwlock using a fair unlock protocol.
-    ///
-    /// See `RwLockReadGuard::unlock_fair`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the rwlock was locked using
-    /// `raw_read` or `raw_try_read`, or if an `RwLockReadGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with shared read access.
-    #[inline]
-    pub unsafe fn raw_unlock_read_fair(&self) {
-        self.raw.unlock_shared(true);
-    }
-
-    /// Releases exclusive write access of the rwlock using a fair unlock
-    /// protocol.
-    ///
-    /// See `RwLockWriteGuard::unlock_fair`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the rwlock was locked using
-    /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with exclusive write access.
-    #[inline]
-    pub unsafe fn raw_unlock_write_fair(&self) {
-        self.raw.unlock_exclusive(true);
-    }
-
-    /// Atomically downgrades a write lock into a read lock without allowing any
-    /// writers to take exclusive access of the lock in the meantime.
-    ///
-    /// See `RwLockWriteGuard::downgrade`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the rwlock was locked using
-    /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with exclusive write access.
-    #[inline]
-    pub unsafe fn raw_downgrade(&self) {
-        self.raw.downgrade();
-    }
-}
-
-impl RwLock<()> {
-    /// Locks this rwlock with shared read access, blocking the current thread
-    /// until it can be acquired.
-    ///
-    /// This is similar to `read`, except that a `RwLockReadGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_read(&self) {
-        self.raw.lock_shared(false);
-    }
-
-    /// Attempts to acquire this rwlock with shared read access.
-    ///
-    /// This is similar to `try_read`, except that a `RwLockReadGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_try_read(&self) -> bool {
-        self.raw.try_lock_shared(false)
-    }
-
-    /// Locks this rwlock with shared read access, blocking the current thread
-    /// until it can be acquired.
-    ///
-    /// This is similar to `read_recursive`, except that a `RwLockReadGuard` is
-    /// not returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_read_recursive(&self) {
-        self.raw.lock_shared(true);
-    }
-
-    /// Attempts to acquire this rwlock with shared read access.
-    ///
-    /// This is similar to `try_read_recursive`, except that a `RwLockReadGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_try_read_recursive(&self) -> bool {
-        self.raw.try_lock_shared(true)
-    }
-
-    /// Locks this rwlock with exclusive write access, blocking the current
-    /// thread until it can be acquired.
-    ///
-    /// This is similar to `write`, except that a `RwLockReadGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_write(&self) {
-        self.raw.lock_exclusive();
-    }
-
-    /// Attempts to lock this rwlock with exclusive write access.
-    ///
-    /// This is similar to `try_write`, except that a `RwLockReadGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// rwlock.
-    #[inline]
-    pub fn raw_try_write(&self) -> bool {
-        self.raw.try_lock_exclusive()
-    }
-}
-
-impl<T: ?Sized + Default> Default for RwLock<T> {
-    #[inline]
-    fn default() -> RwLock<T> {
-        RwLock::new(Default::default())
-    }
-}
-
-impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.try_read() {
-            Some(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
-            None => write!(f, "RwLock {{ <locked> }}"),
-        }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> RwLockReadGuard<'a, T> {
-    /// Unlocks the `RwLock` using a fair unlock protocol.
-    ///
-    /// By default, `RwLock` is unfair and allow the current thread to re-lock
-    /// the rwlock before another has the chance to acquire the lock, even if
-    /// that thread has been blocked on the `RwLock` for a long time. This is
-    /// the default because it allows much higher throughput as it avoids
-    /// forcing a context switch on every rwlock unlock. This can result in one
-    /// thread acquiring a `RwLock` many more times than other threads.
-    ///
-    /// However in some cases it can be beneficial to ensure fairness by forcing
-    /// the lock to pass on to a waiting thread if there is one. This is done by
-    /// using this method instead of dropping the `RwLockReadGuard` normally.
-    #[inline]
-    pub fn unlock_fair(self) {
-        self.rwlock.raw.unlock_shared(true);
-        mem::forget(self);
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, T> {
-    type Target = T;
-    #[inline]
-    fn deref(&self) -> &T {
-        unsafe { &*self.rwlock.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, T> {
-    #[inline]
-    fn drop(&mut self) {
-        self.rwlock.raw.unlock_shared(false);
-    }
-}
-
-#[cfg(feature = "owning_ref")]
-unsafe impl<'a, T: ?Sized> StableAddress for RwLockReadGuard<'a, T> {}
-
-impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
-    /// Atomically downgrades a write lock into a read lock without allowing any
-    /// writers to take exclusive access of the lock in the meantime.
-    ///
-    /// Note that if there are any writers currently waiting to take the lock
-    /// then other readers may not be able to acquire the lock even if it was
-    /// downgraded.
-    pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
-        self.rwlock.raw.downgrade();
-        let rwlock = self.rwlock;
-        mem::forget(self);
-        RwLockReadGuard {
-            rwlock: rwlock,
-            marker: PhantomData,
-        }
-    }
-
-    /// Unlocks the `RwLock` using a fair unlock protocol.
-    ///
-    /// By default, `RwLock` is unfair and allow the current thread to re-lock
-    /// the rwlock before another has the chance to acquire the lock, even if
-    /// that thread has been blocked on the `RwLock` for a long time. This is
-    /// the default because it allows much higher throughput as it avoids
-    /// forcing a context switch on every rwlock unlock. This can result in one
-    /// thread acquiring a `RwLock` many more times than other threads.
-    ///
-    /// However in some cases it can be beneficial to ensure fairness by forcing
-    /// the lock to pass on to a waiting thread if there is one. This is done by
-    /// using this method instead of dropping the `RwLockWriteGuard` normally.
-    #[inline]
-    pub fn unlock_fair(self) {
-        self.rwlock.raw.unlock_exclusive(true);
-        mem::forget(self);
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, T> {
-    type Target = T;
-    #[inline]
-    fn deref(&self) -> &T {
-        unsafe { &*self.rwlock.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, T> {
-    #[inline]
-    fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.rwlock.data.get() }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, T> {
-    #[inline]
-    fn drop(&mut self) {
-        self.rwlock.raw.unlock_exclusive(false);
-    }
-}
-
-#[cfg(feature = "owning_ref")]
-unsafe impl<'a, T: ?Sized> StableAddress for RwLockWriteGuard<'a, T> {}
+/// RAII structure used to release the upgradable read access of a lock when
+/// dropped.
+pub type RwLockUpgradableReadGuard<'a, T> =
+    lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
 
 #[cfg(test)]
 mod tests {
     extern crate rand;
     use self::rand::Rng;
-    use std::sync::mpsc::channel;
-    use std::thread;
-    use std::sync::Arc;
     use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
     use std::time::Duration;
-    use RwLock;
+    use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
 
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
@@ -682,7 +138,9 @@
         let l = RwLock::new(());
         drop(l.read());
         drop(l.write());
+        drop(l.upgradable_read());
         drop((l.read(), l.read()));
+        drop((l.read(), l.upgradable_read()));
         drop(l.write());
     }
 
@@ -700,7 +158,7 @@
             thread::spawn(move || {
                 let mut rng = rand::thread_rng();
                 for _ in 0..M {
-                    if rng.gen_weighted_bool(N) {
+                    if rng.gen_bool(1.0 / N as f64) {
                         drop(r.write());
                     } else {
                         drop(r.read());
@@ -748,6 +206,7 @@
         let lock = arc.read();
         assert_eq!(*lock, 1);
     }
+
     #[test]
     fn test_rw_arc_no_poison_rw() {
         let arc = Arc::new(RwLock::new(1));
@@ -761,6 +220,62 @@
     }
 
     #[test]
+    fn test_ruw_arc() {
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        thread::spawn(move || {
+            for _ in 0..10 {
+                let mut lock = arc2.write();
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }
+            tx.send(()).unwrap();
+        });
+
+        let mut children = Vec::new();
+
+        // Upgradable readers try to catch the writer in the act and also
+        // try to touch the value
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc3.upgradable_read();
+                let tmp = *lock;
+                assert!(tmp >= 0);
+                thread::yield_now();
+                let mut lock = RwLockUpgradableReadGuard::upgrade(lock);
+                assert_eq!(tmp, *lock);
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }));
+        }
+
+        // Readers try to catch the writers in the act
+        for _ in 0..5 {
+            let arc4 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc4.read();
+                assert!(*lock >= 0);
+            }));
+        }
+
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = arc.read();
+        assert_eq!(*lock, 15);
+    }
+
+    #[test]
     fn test_rw_arc() {
         let arc = Arc::new(RwLock::new(0));
         let arc2 = arc.clone();
@@ -832,22 +347,117 @@
     }
 
     #[test]
+    fn test_rwlock_try_read() {
+        let lock = RwLock::new(0isize);
+        {
+            let read_guard = lock.read();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_some(),
+                "try_read should succeed while read_guard is in scope"
+            );
+
+            drop(read_guard);
+        }
+        {
+            let upgrade_guard = lock.upgradable_read();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_some(),
+                "try_read should succeed while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_none(),
+                "try_read should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
+    }
+
+    #[test]
     fn test_rwlock_try_write() {
         let lock = RwLock::new(0isize);
-        let read_guard = lock.read();
+        {
+            let read_guard = lock.read();
 
-        let write_result = lock.try_write();
-        match write_result {
-            None => (),
-            Some(_) => {
-                assert!(
-                    false,
-                    "try_write should not succeed while read_guard is in scope"
-                )
-            }
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while read_guard is in scope"
+            );
+
+            drop(read_guard);
         }
+        {
+            let upgrade_guard = lock.upgradable_read();
 
-        drop(read_guard);
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
+    }
+
+    #[test]
+    fn test_rwlock_try_upgrade() {
+        let lock = RwLock::new(0isize);
+        {
+            let read_guard = lock.read();
+
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_some(),
+                "try_upgradable_read should succeed while read_guard is in scope"
+            );
+
+            drop(read_guard);
+        }
+        {
+            let upgrade_guard = lock.upgradable_read();
+
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_none(),
+                "try_upgradable_read should fail while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_none(),
+                "try_upgradable should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
     }
 
     #[test]
@@ -881,16 +491,6 @@
         assert_eq!(m.into_inner(), NonCopy(20));
     }
 
-    #[cfg(not(feature = "deadlock_detection"))]
-    #[test]
-    fn test_rwlockguard_send() {
-        fn send<T: Send>(_: T) {}
-
-        let rwlock = RwLock::new(());
-        send(rwlock.read());
-        send(rwlock.write());
-    }
-
     #[test]
     fn test_rwlockguard_sync() {
         fn sync<T: Sync>(_: T) {}
@@ -906,12 +506,14 @@
         let mut handles = Vec::new();
         for _ in 0..8 {
             let x = x.clone();
-            handles.push(thread::spawn(move || for _ in 0..100 {
-                let mut writer = x.write();
-                *writer += 1;
-                let cur_val = *writer;
-                let reader = writer.downgrade();
-                assert_eq!(cur_val, *reader);
+            handles.push(thread::spawn(move || {
+                for _ in 0..100 {
+                    let mut writer = x.write();
+                    *writer += 1;
+                    let cur_val = *writer;
+                    let reader = RwLockWriteGuard::downgrade(writer);
+                    assert_eq!(cur_val, *reader);
+                }
             }));
         }
         for handle in handles {
@@ -925,10 +527,38 @@
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _lock1 = arc.read();
-        thread::spawn(move || { let _lock = arc2.write(); });
+        thread::spawn(move || {
+            let _lock = arc2.write();
+        });
         thread::sleep(Duration::from_millis(100));
 
         // A normal read would block here since there is a pending writer
         let _lock2 = arc.read_recursive();
     }
+
+    #[test]
+    fn test_rwlock_debug() {
+        let x = RwLock::new(vec![0u8, 10]);
+
+        assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
+        assert_eq!(
+            format!("{:#?}", x),
+            "RwLock {
+    data: [
+        0,
+        10
+    ]
+}"
+        );
+        let _lock = x.write();
+        assert_eq!(format!("{:?}", x), "RwLock { <locked> }");
+    }
+
+    #[test]
+    fn test_clone() {
+        let rwlock = RwLock::new(Arc::new(1));
+        let a = rwlock.read_recursive();
+        let b = a.clone();
+        assert_eq!(Arc::strong_count(&b), 2);
+    }
 }
diff --git a/rustc_deps/vendor/parking_lot/src/stable.rs b/rustc_deps/vendor/parking_lot/src/stable.rs
deleted file mode 100644
index 94e68c6..0000000
--- a/rustc_deps/vendor/parking_lot/src/stable.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2016 Amanieu d'Antras
-//
-// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
-// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
-// http://opensource.org/licenses/MIT>, at your option. This file may not be
-// copied, modified, or distributed except according to those terms.
-
-#![allow(dead_code)]
-
-use std::sync::atomic;
-
-// Re-export this for convenience
-pub use std::sync::atomic::{Ordering, fence};
-
-// Wrapper around AtomicUsize for non-nightly which has usable compare_exchange
-// and compare_exchange_weak methods.
-pub struct AtomicUsize(atomic::AtomicUsize);
-pub use self::AtomicUsize as AtomicU8;
-
-// Constants for static initialization
-pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize(atomic::ATOMIC_USIZE_INIT);
-pub use self::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
-
-impl AtomicUsize {
-    #[inline]
-    pub fn new(val: usize) -> AtomicUsize {
-        AtomicUsize(atomic::AtomicUsize::new(val))
-    }
-    #[inline]
-    pub fn load(&self, order: Ordering) -> usize {
-        self.0.load(order)
-    }
-    #[inline]
-    pub fn store(&self, val: usize, order: Ordering) {
-        self.0.store(val, order);
-    }
-    #[inline]
-    pub fn swap(&self, val: usize, order: Ordering) -> usize {
-        self.0.swap(val, order)
-    }
-    #[inline]
-    pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_add(val, order)
-    }
-    #[inline]
-    pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_sub(val, order)
-    }
-    #[inline]
-    pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_and(val, order)
-    }
-    #[inline]
-    pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_or(val, order)
-    }
-    #[inline]
-    pub fn compare_exchange(
-        &self,
-        old: usize,
-        new: usize,
-        order: Ordering,
-        _: Ordering,
-    ) -> Result<usize, usize> {
-        let res = self.0.compare_and_swap(old, new, order);
-        if res == old { Ok(res) } else { Err(res) }
-    }
-    #[inline]
-    pub fn compare_exchange_weak(
-        &self,
-        old: usize,
-        new: usize,
-        order: Ordering,
-        _: Ordering,
-    ) -> Result<usize, usize> {
-        let res = self.0.compare_and_swap(old, new, order);
-        if res == old { Ok(res) } else { Err(res) }
-    }
-}
diff --git a/rustc_deps/vendor/parking_lot_core/.cargo-checksum.json b/rustc_deps/vendor/parking_lot_core/.cargo-checksum.json
index 0dbf94d..6706d64 100644
--- a/rustc_deps/vendor/parking_lot_core/.cargo-checksum.json
+++ b/rustc_deps/vendor/parking_lot_core/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"220144666e4c0a4b3b3235e7d3b10f4f34cb3b8ca292ee19437f23c9a15758de","src/lib.rs":"e80f927665ef24660878e5e4a4ea3c26892c2849889d59aacee6beb59d02020d","src/parking_lot.rs":"2da388ff4c13003fc30531bb6110e4feedac30ad3ce905912e657711a6b0fdad","src/spinwait.rs":"cbd2d2464ef6fa5fb05109bdb3ca588467949dcd4ee9194deafef6004d10215e","src/thread_parker/generic.rs":"0c30db3d1c96bd5ef284a4761a829aba8d21fc813b3d1d70b2baf5f00744e006","src/thread_parker/linux.rs":"1c4c023ebb58fcc16451683c6c8b68311e87ab34537dc17a060ddf5aad02a215","src/thread_parker/unix.rs":"dc6f4af965618cc2d87d3bef6455ba78b44ffe5b38dff9d41fb86e1526cbbcd1","src/thread_parker/windows/keyed_event.rs":"efe64f7bcdfe03049a7b901d2573bc7db1bb73b8ab4a040245423d95c8f9514f","src/thread_parker/windows/mod.rs":"f31eed53f3e402477d80a70a7c6d474c01ba4c9ad952bbe562509448cd3cc1ad","src/thread_parker/windows/waitaddress.rs":"09d1e6a5a6c3f23f375ae4beee946290f7c66d183e69d476ce69b21a4a5aa7af","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"692f443c52672c6e88c0cad259cf7c89dc2a1b54aa95eeeea582401b2a7d058d"},"package":"4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"}
\ No newline at end of file
+{"files":{"Cargo.toml":"dc95a171db000a9243f985288bafad64342c5a669cfeb63a11776d0858e7c0a1","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","build.rs":"b4ca69501aa539f2d9a50a3188e93d69eb5ad0c38d5cd194df361a3f2f9a8b92","src/lib.rs":"9d5d80460481b44b8d9048b7319be33ac989250960c61b7c4528c0431f9ad36c","src/parking_lot.rs":"f7f597e768def124c78241a419ca2df965f0896b079e195a9d884d89cfbe9f17","src/spinwait.rs":"45a0f2a57177bc9759c0c2167f758ee220fba7cd8bdf1fbcd9c2b962fef9a1e5","src/thread_parker/generic.rs":"985966b1bdc763ada6d1fee800d19dd92611da4f84cfeef2d82763ffd2a499c3","src/thread_parker/linux.rs":"c41d08ab333972b67f20443f474ed25a1f171db0f87e227df4bdc504f6f9d897","src/thread_parker/unix.rs":"6364bcfeacfb3189bcebeddfccef1b9843d3c59d6d0f046827e62b33bcee30dd","src/thread_parker/windows/keyed_event.rs":"83d4bace5d2f17993071de28043e94f3b9673c887e01c72653b6fd24db260634","src/thread_parker/windows/mod.rs":"f31eed53f3e402477d80a70a7c6d474c01ba4c9ad952bbe562509448cd3cc1ad","src/thread_parker/windows/waitaddress.rs":"1a6eee8ab9131c746089e78b0aab900f23a5ed8ee6f2e789b8ec4d2232153ab8","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"c4d50c0d37e6dd92fb078c80f84dc5b5c83bdf46baad34ed35af8730a345a2b5"},"package":"ad7f7e6ebdc79edff6fdcb87a55b620174f7a989e3eb31b65231f4af57f00b8c"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/parking_lot_core/Cargo.toml b/rustc_deps/vendor/parking_lot_core/Cargo.toml
index e5855ac..b84475a 100644
--- a/rustc_deps/vendor/parking_lot_core/Cargo.toml
+++ b/rustc_deps/vendor/parking_lot_core/Cargo.toml
@@ -12,11 +12,11 @@
 
 [package]
 name = "parking_lot_core"
-version = "0.2.14"
+version = "0.3.1"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "An advanced API for creating custom synchronization primitives."
-documentation = "https://amanieu.github.io/parking_lot/parking_lot_core/index.html"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
 [dependencies.backtrace]
@@ -28,7 +28,7 @@
 optional = true
 
 [dependencies.rand]
-version = "0.4"
+version = "0.5"
 
 [dependencies.smallvec]
 version = "0.6"
@@ -36,6 +36,8 @@
 [dependencies.thread-id]
 version = "3.2.0"
 optional = true
+[build-dependencies.rustc_version]
+version = "0.2"
 
 [features]
 deadlock_detection = ["petgraph", "thread-id", "backtrace"]
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/parking_lot_core/LICENSE-APACHE
similarity index 89%
rename from rustc_deps/vendor/parking_lot_core/LICENSE
rename to rustc_deps/vendor/parking_lot_core/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/parking_lot_core/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE-MIT b/rustc_deps/vendor/parking_lot_core/LICENSE-MIT
new file mode 100644
index 0000000..40b8817
--- /dev/null
+++ b/rustc_deps/vendor/parking_lot_core/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/parking_lot_core/build.rs b/rustc_deps/vendor/parking_lot_core/build.rs
new file mode 100644
index 0000000..8d745ee
--- /dev/null
+++ b/rustc_deps/vendor/parking_lot_core/build.rs
@@ -0,0 +1,8 @@
+extern crate rustc_version;
+use rustc_version::{version, Version};
+
+fn main() {
+    if version().unwrap() >= Version::parse("1.26.0").unwrap() {
+        println!("cargo:rustc-cfg=has_localkey_try_with");
+    }
+}
diff --git a/rustc_deps/vendor/parking_lot_core/src/lib.rs b/rustc_deps/vendor/parking_lot_core/src/lib.rs
index 4bf6414..338b63e 100644
--- a/rustc_deps/vendor/parking_lot_core/src/lib.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/lib.rs
@@ -38,7 +38,10 @@
 //! reference count and the two mutex bits in the same atomic word.
 
 #![warn(missing_docs)]
-#![cfg_attr(all(feature = "nightly", target_os = "linux"), feature(integer_atomics))]
+#![cfg_attr(
+    all(feature = "nightly", target_os = "linux"),
+    feature(integer_atomics)
+)]
 
 extern crate rand;
 extern crate smallvec;
@@ -69,13 +72,13 @@
 #[path = "thread_parker/generic.rs"]
 mod thread_parker;
 
-mod util;
-mod spinwait;
-mod word_lock;
 mod parking_lot;
+mod spinwait;
+mod util;
+mod word_lock;
 
+pub use parking_lot::deadlock;
+pub use parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
 pub use parking_lot::{FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken};
 pub use parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
-pub use parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
 pub use spinwait::SpinWait;
-pub use parking_lot::deadlock;
diff --git a/rustc_deps/vendor/parking_lot_core/src/parking_lot.rs b/rustc_deps/vendor/parking_lot_core/src/parking_lot.rs
index ab01fdc..2d525a7 100644
--- a/rustc_deps/vendor/parking_lot_core/src/parking_lot.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/parking_lot.rs
@@ -5,19 +5,20 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
-use std::time::{Duration, Instant};
-use std::cell::{Cell, UnsafeCell};
-use std::ptr;
-use std::mem;
-use std::thread::LocalKey;
-#[cfg(not(feature = "nightly"))]
-use std::panic;
+use rand::rngs::SmallRng;
+use rand::{FromEntropy, Rng};
 use smallvec::SmallVec;
-use rand::{self, Rng, XorShiftRng};
+use std::cell::{Cell, UnsafeCell};
+use std::mem;
+#[cfg(not(has_localkey_try_with))]
+use std::panic;
+use std::ptr;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::thread::LocalKey;
+use std::time::{Duration, Instant};
 use thread_parker::ThreadParker;
-use word_lock::WordLock;
 use util::UncheckedOptionExt;
+use word_lock::WordLock;
 
 static NUM_THREADS: AtomicUsize = ATOMIC_USIZE_INIT;
 static HASHTABLE: AtomicUsize = ATOMIC_USIZE_INIT;
@@ -91,14 +92,14 @@
     timeout: Instant,
 
     // Random number generator for calculating the next timeout
-    rng: XorShiftRng,
+    rng: SmallRng,
 }
 
 impl FairTimeout {
     fn new() -> FairTimeout {
         FairTimeout {
             timeout: Instant::now(),
-            rng: rand::weak_rng(),
+            rng: SmallRng::from_entropy(),
         }
     }
 
@@ -135,7 +136,8 @@
 
     // Extra data for deadlock detection
     // TODO: once supported in stable replace with #[cfg...] & remove dummy struct/impl
-    #[allow(dead_code)] deadlock_data: deadlock::DeadlockData,
+    #[allow(dead_code)]
+    deadlock_data: deadlock::DeadlockData,
 }
 
 impl ThreadData {
@@ -163,11 +165,11 @@
 unsafe fn get_thread_data(local: &mut Option<ThreadData>) -> &ThreadData {
     // Try to read from thread-local storage, but return None if the TLS has
     // already been destroyed.
-    #[cfg(feature = "nightly")]
+    #[cfg(has_localkey_try_with)]
     fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
         key.try_with(|x| x as *const ThreadData).ok()
     }
-    #[cfg(not(feature = "nightly"))]
+    #[cfg(not(has_localkey_try_with))]
     fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
         panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok()
     }
@@ -1068,7 +1070,7 @@
     result
 }
 
-/// [Experimental] Deadlock detection
+/// \[Experimental\] Deadlock detection
 ///
 /// Enabled via the `deadlock_detection` feature flag.
 pub mod deadlock {
@@ -1126,14 +1128,14 @@
 #[cfg(feature = "deadlock_detection")]
 mod deadlock_impl {
     use super::{get_hashtable, get_thread_data, lock_bucket, ThreadData, NUM_THREADS};
-    use std::cell::{Cell, UnsafeCell};
-    use std::sync::mpsc;
-    use std::sync::atomic::Ordering;
-    use std::collections::HashSet;
-    use thread_id;
     use backtrace::Backtrace;
     use petgraph;
     use petgraph::graphmap::DiGraphMap;
+    use std::cell::{Cell, UnsafeCell};
+    use std::collections::HashSet;
+    use std::sync::atomic::Ordering;
+    use std::sync::mpsc;
+    use thread_id;
 
     /// Representation of a deadlocked thread
     pub struct DeadlockedThread {
@@ -1185,8 +1187,7 @@
                 .send(DeadlockedThread {
                     thread_id: td.deadlock_data.thread_id,
                     backtrace: Backtrace::new(),
-                })
-                .unwrap();
+                }).unwrap();
             // make sure to close this sender
             drop(sender);
 
@@ -1362,14 +1363,15 @@
 
     // returns all thread cycles in the wait graph
     fn graph_cycles(g: &DiGraphMap<WaitGraphNode, ()>) -> Vec<Vec<*const ThreadData>> {
-        use petgraph::visit::NodeIndexable;
         use petgraph::visit::depth_first_search;
         use petgraph::visit::DfsEvent;
+        use petgraph::visit::NodeIndexable;
 
         let mut cycles = HashSet::new();
         let mut path = Vec::with_capacity(g.node_bound());
         // start from threads to get the correct threads cycle
-        let threads = g.nodes()
+        let threads = g
+            .nodes()
             .filter(|n| if let &Thread(_) = n { true } else { false });
 
         depth_first_search(g, threads, |e| match e {
diff --git a/rustc_deps/vendor/parking_lot_core/src/spinwait.rs b/rustc_deps/vendor/parking_lot_core/src/spinwait.rs
index 38128c5..4185026 100644
--- a/rustc_deps/vendor/parking_lot_core/src/spinwait.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/spinwait.rs
@@ -7,11 +7,11 @@
 
 #[cfg(unix)]
 use libc;
-#[cfg(windows)]
-use winapi;
+use std::sync::atomic::spin_loop_hint;
 #[cfg(not(any(windows, unix)))]
 use std::thread;
-use std::sync::atomic::spin_loop_hint;
+#[cfg(windows)]
+use winapi;
 
 // Yields the rest of the current timeslice to the OS
 #[cfg(windows)]
@@ -90,12 +90,12 @@
     /// to yielding the CPU to the OS after a few iterations.
     #[inline]
     pub fn spin(&mut self) -> bool {
-        if self.counter >= 20 {
+        if self.counter >= 10 {
             return false;
         }
         self.counter += 1;
-        if self.counter <= 10 {
-            cpu_relax(4 << self.counter);
+        if self.counter <= 3 {
+            cpu_relax(1 << self.counter);
         } else {
             thread_yield();
         }
@@ -113,7 +113,7 @@
         if self.counter > 10 {
             self.counter = 10;
         }
-        cpu_relax(4 << self.counter);
+        cpu_relax(1 << self.counter);
     }
 }
 
diff --git a/rustc_deps/vendor/parking_lot_core/src/thread_parker/generic.rs b/rustc_deps/vendor/parking_lot_core/src/thread_parker/generic.rs
index f26d456..d06406a 100644
--- a/rustc_deps/vendor/parking_lot_core/src/thread_parker/generic.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/thread_parker/generic.rs
@@ -5,8 +5,8 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::{Condvar, Mutex, MutexGuard};
 use std::cell::Cell;
+use std::sync::{Condvar, Mutex, MutexGuard};
 use std::time::Instant;
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
diff --git a/rustc_deps/vendor/parking_lot_core/src/thread_parker/linux.rs b/rustc_deps/vendor/parking_lot_core/src/thread_parker/linux.rs
index aee380d..4a13d73 100644
--- a/rustc_deps/vendor/parking_lot_core/src/thread_parker/linux.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/thread_parker/linux.rs
@@ -5,9 +5,9 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use libc;
 use std::sync::atomic::{AtomicI32, Ordering};
 use std::time::Instant;
-use libc;
 
 const FUTEX_WAIT: i32 = 0;
 const FUTEX_WAKE: i32 = 1;
diff --git a/rustc_deps/vendor/parking_lot_core/src/thread_parker/unix.rs b/rustc_deps/vendor/parking_lot_core/src/thread_parker/unix.rs
index a9e7600..169586b 100644
--- a/rustc_deps/vendor/parking_lot_core/src/thread_parker/unix.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/thread_parker/unix.rs
@@ -5,12 +5,21 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::{Cell, UnsafeCell};
-use std::time::{Duration, Instant};
 use libc;
+use std::cell::{Cell, UnsafeCell};
 use std::mem;
 #[cfg(any(target_os = "macos", target_os = "ios"))]
 use std::ptr;
+use std::time::{Duration, Instant};
+
+// x32 Linux uses a non-standard type for tv_nsec in timespec.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = i64;
+#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = libc::c_long;
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
 pub struct ThreadParker {
@@ -31,9 +40,17 @@
     }
 
     // Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
-    #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))]
+    #[cfg(any(
+        target_os = "macos",
+        target_os = "ios",
+        target_os = "android"
+    ))]
     unsafe fn init(&self) {}
-    #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))]
+    #[cfg(not(any(
+        target_os = "macos",
+        target_os = "ios",
+        target_os = "android"
+    )))]
     unsafe fn init(&self) {
         let mut attr: libc::pthread_condattr_t = mem::uninitialized();
         let r = libc::pthread_condattr_init(&mut attr);
@@ -184,7 +201,7 @@
     debug_assert_eq!(r, 0);
     libc::timespec {
         tv_sec: now.tv_sec,
-        tv_nsec: now.tv_usec as libc::c_long * 1000,
+        tv_nsec: now.tv_usec as tv_nsec_t * 1000,
     }
 }
 #[cfg(not(any(target_os = "macos", target_os = "ios")))]
@@ -211,7 +228,7 @@
     }
 
     let now = timespec_now();
-    let mut nsec = now.tv_nsec + timeout.subsec_nanos() as libc::c_long;
+    let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t;
     let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t);
     if nsec >= 1_000_000_000 {
         nsec -= 1_000_000_000;
diff --git a/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs b/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs
index 599d66f..0e32fce 100644
--- a/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs
@@ -5,10 +5,10 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::mem;
+use std::ptr;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::time::Instant;
-use std::ptr;
-use std::mem;
 
 use winapi::shared::minwindef::{TRUE, ULONG};
 use winapi::shared::ntdef::NTSTATUS;
diff --git a/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs b/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs
index 7918d7b..1d9fca8 100644
--- a/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs
@@ -5,9 +5,9 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::mem;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::time::Instant;
-use std::mem;
 
 use winapi::shared::basetsd::SIZE_T;
 use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE};
@@ -81,7 +81,8 @@
                 return false;
             }
             let diff = timeout - now;
-            let timeout = diff.as_secs()
+            let timeout = diff
+                .as_secs()
                 .checked_mul(1000)
                 .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
                 .map(|ms| {
@@ -90,8 +91,7 @@
                     } else {
                         ms as DWORD
                     }
-                })
-                .unwrap_or(INFINITE);
+                }).unwrap_or(INFINITE);
             let cmp = 1usize;
             let r = (self.WaitOnAddress)(
                 key as *const _ as PVOID,
diff --git a/rustc_deps/vendor/parking_lot_core/src/word_lock.rs b/rustc_deps/vendor/parking_lot_core/src/word_lock.rs
index 466d38f..7960568 100644
--- a/rustc_deps/vendor/parking_lot_core/src/word_lock.rs
+++ b/rustc_deps/vendor/parking_lot_core/src/word_lock.rs
@@ -5,14 +5,14 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{fence, AtomicUsize, Ordering};
-use std::ptr;
-use std::mem;
-use std::cell::Cell;
-use std::thread::LocalKey;
-#[cfg(not(feature = "nightly"))]
-use std::panic;
 use spinwait::SpinWait;
+use std::cell::Cell;
+use std::mem;
+#[cfg(not(has_localkey_try_with))]
+use std::panic;
+use std::ptr;
+use std::sync::atomic::{fence, AtomicUsize, Ordering};
+use std::thread::LocalKey;
 use thread_parker::ThreadParker;
 
 struct ThreadData {
@@ -50,11 +50,11 @@
 unsafe fn get_thread_data(local: &mut Option<ThreadData>) -> &ThreadData {
     // Try to read from thread-local storage, but return None if the TLS has
     // already been destroyed.
-    #[cfg(feature = "nightly")]
+    #[cfg(has_localkey_try_with)]
     fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
         key.try_with(|x| x as *const ThreadData).ok()
     }
-    #[cfg(not(feature = "nightly"))]
+    #[cfg(not(has_localkey_try_with))]
     fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
         panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok()
     }
@@ -93,7 +93,8 @@
 
     #[inline]
     pub unsafe fn lock(&self) {
-        if self.state
+        if self
+            .state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
diff --git a/rustc_deps/vendor/rustc_version/.cargo-checksum.json b/rustc_deps/vendor/rustc_version/.cargo-checksum.json
new file mode 100644
index 0000000..f86fe1c
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"80b9fb136c8c2945b4875b05b0f5a4b11e4722997e751f17d8d3f241d7c684db","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"58bd14a1dfa1d828e6e99f35c3b7c2149d08e2d990d6ca93f92ab8ffb43275b7","src/errors.rs":"b28c2eeb1278fc3e8d68a64b177034faed67f6762335729d3a6d1e61be8fb034","src/lib.rs":"92a32673f77961724bc52b872781f06d22d166f06838c9582c5adae3c5214f51"},"package":"138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/rustc_version/Cargo.toml b/rustc_deps/vendor/rustc_version/Cargo.toml
new file mode 100644
index 0000000..3b252b8
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/Cargo.toml
@@ -0,0 +1,26 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "rustc_version"
+version = "0.2.3"
+authors = ["Marvin Löbel <loebel.marvin@gmail.com>"]
+description = "A library for querying the version of a installed rustc compiler"
+documentation = "https://docs.rs/rustc_version/"
+readme = "README.md"
+keywords = ["version", "rustc"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/Kimundi/rustc-version-rs"
+[dependencies.semver]
+version = "0.9"
+[badges.travis-ci]
+repository = "Kimundi/rustc-version-rs"
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/rustc_version/LICENSE-APACHE
similarity index 88%
copy from rustc_deps/vendor/parking_lot_core/LICENSE
copy to rustc_deps/vendor/rustc_version/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/rustc_version/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/rustc_version/LICENSE-MIT b/rustc_deps/vendor/rustc_version/LICENSE-MIT
new file mode 100644
index 0000000..40b8817
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/rustc_version/README.md b/rustc_deps/vendor/rustc_version/README.md
new file mode 100644
index 0000000..f491ca9
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/README.md
@@ -0,0 +1,75 @@
+rustc-version-rs
+==============
+
+A library for querying the version of a `rustc` compiler.
+
+This can be used by build scripts or other tools dealing with Rust sources
+to make decisions based on the version of the compiler.
+
+[![Travis-CI Status](https://travis-ci.org/Kimundi/rustc-version-rs.png?branch=master)](https://travis-ci.org/Kimundi/rustc-version-rs)
+
+# Getting Started
+
+[rustc-version-rs is available on crates.io](https://crates.io/crates/rustc_version).
+It is recommended to look there for the newest released version, as well as links to the newest builds of the docs.
+
+At the point of the last update of this README, the latest published version could be used like this:
+
+Add the following dependency to your Cargo manifest...
+
+```toml
+[build-dependencies]
+rustc_version = "0.2"
+```
+
+...and see the [docs](http://kimundi.github.io/rustc-version-rs/rustc_version/index.html) for how to use it.
+
+# Example
+
+```rust
+// This could be a cargo build script
+
+extern crate rustc_version;
+use rustc_version::{version, version_meta, Channel, Version};
+
+fn main() {
+    // Assert we haven't travelled back in time
+    assert!(version().unwrap().major >= 1);
+
+    // Set cfg flags depending on release channel
+    match version_meta().unwrap().channel {
+        Channel::Stable => {
+            println!("cargo:rustc-cfg=RUSTC_IS_STABLE");
+        }
+        Channel::Beta => {
+            println!("cargo:rustc-cfg=RUSTC_IS_BETA");
+        }
+        Channel::Nightly => {
+            println!("cargo:rustc-cfg=RUSTC_IS_NIGHTLY");
+        }
+        Channel::Dev => {
+            println!("cargo:rustc-cfg=RUSTC_IS_DEV");
+        }
+    }
+
+    // Check for a minimum version
+    if version().unwrap() >= Version::parse("1.4.0").unwrap() {
+        println!("cargo:rustc-cfg=compiler_has_important_bugfix");
+    }
+}
+```
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/rustc_deps/vendor/rustc_version/src/errors.rs b/rustc_deps/vendor/rustc_version/src/errors.rs
new file mode 100644
index 0000000..54557b6
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/src/errors.rs
@@ -0,0 +1,79 @@
+use std::{self, error, fmt, io, str};
+use semver::{self, Identifier};
+
+/// The error type for this crate.
+#[derive(Debug)]
+pub enum Error {
+    /// An error ocurrend when executing the `rustc` command.
+    CouldNotExecuteCommand(io::Error),
+    /// The output of `rustc -vV` was not valid utf-8.
+    Utf8Error(str::Utf8Error),
+    /// The output of `rustc -vV` was not in the expected format.
+    UnexpectedVersionFormat,
+    /// An error ocurred in parsing a `VersionReq`.
+    ReqParseError(semver::ReqParseError),
+    /// An error ocurred in parsing the semver.
+    SemVerError(semver::SemVerError),
+    /// The pre-release tag is unknown.
+    UnknownPreReleaseTag(Identifier),
+}
+use Error::*;
+
+impl fmt::Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use std::error::Error;
+        match *self {
+            CouldNotExecuteCommand(ref e) => write!(f, "{}: {}", self.description(), e),
+            Utf8Error(_) => write!(f, "{}", self.description()),
+            UnexpectedVersionFormat => write!(f, "{}", self.description()),
+            ReqParseError(ref e) => write!(f, "{}: {}", self.description(), e),
+            SemVerError(ref e) => write!(f, "{}: {}", self.description(), e),
+            UnknownPreReleaseTag(ref i) => write!(f, "{}: {}", self.description(), i),
+        }
+    }
+}
+
+impl error::Error for Error {
+    fn cause(&self) -> Option<&error::Error> {
+        match *self {
+            CouldNotExecuteCommand(ref e) => Some(e),
+            Utf8Error(ref e) => Some(e),
+            UnexpectedVersionFormat => None,
+            ReqParseError(ref e) => Some(e),
+            SemVerError(ref e) => Some(e),
+            UnknownPreReleaseTag(_) => None,
+        }
+    }
+
+    fn description(&self) -> &str {
+        match *self {
+            CouldNotExecuteCommand(_) => "could not execute command",
+            Utf8Error(_) => "invalid UTF-8 output from `rustc -vV`",
+            UnexpectedVersionFormat => "unexpected `rustc -vV` format",
+            ReqParseError(_) => "error parsing version requirement",
+            SemVerError(_) => "error parsing version",
+            UnknownPreReleaseTag(_) => "unknown pre-release tag",
+        }
+    }
+}
+
+macro_rules! impl_from {
+    ($($err_ty:ty => $variant:ident),* $(,)*) => {
+        $(
+            impl From<$err_ty> for Error {
+                fn from(e: $err_ty) -> Error {
+                    Error::$variant(e)
+                }
+            }
+        )*
+    }
+}
+
+impl_from! {
+    str::Utf8Error => Utf8Error,
+    semver::SemVerError => SemVerError,
+    semver::ReqParseError => ReqParseError,
+}
+
+/// The result type for this crate.
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/rustc_deps/vendor/rustc_version/src/lib.rs b/rustc_deps/vendor/rustc_version/src/lib.rs
new file mode 100644
index 0000000..c038288
--- /dev/null
+++ b/rustc_deps/vendor/rustc_version/src/lib.rs
@@ -0,0 +1,347 @@
+// Copyright 2016 rustc-version-rs developers
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![warn(missing_docs)]
+
+//! Simple library for getting the version information of a `rustc`
+//! compiler.
+//!
+//! This can be used by build scripts or other tools dealing with Rust sources
+//! to make decisions based on the version of the compiler.
+//!
+//! It calls `$RUSTC --version -v` and parses the output, falling
+//! back to `rustc` if `$RUSTC` is not set.
+//!
+//! # Example
+//!
+//! ```rust
+//! // This could be a cargo build script
+//!
+//! extern crate rustc_version;
+//! use rustc_version::{version, version_meta, Channel, Version};
+//!
+//! fn main() {
+//!     // Assert we haven't travelled back in time
+//!     assert!(version().unwrap().major >= 1);
+//!
+//!     // Set cfg flags depending on release channel
+//!     match version_meta().unwrap().channel {
+//!         Channel::Stable => {
+//!             println!("cargo:rustc-cfg=RUSTC_IS_STABLE");
+//!         }
+//!         Channel::Beta => {
+//!             println!("cargo:rustc-cfg=RUSTC_IS_BETA");
+//!         }
+//!         Channel::Nightly => {
+//!             println!("cargo:rustc-cfg=RUSTC_IS_NIGHTLY");
+//!         }
+//!         Channel::Dev => {
+//!             println!("cargo:rustc-cfg=RUSTC_IS_DEV");
+//!         }
+//!     }
+//!
+//!     // Check for a minimum version
+//!     if version().unwrap() >= Version::parse("1.4.0").unwrap() {
+//!         println!("cargo:rustc-cfg=compiler_has_important_bugfix");
+//!     }
+//! }
+//! ```
+
+extern crate semver;
+use semver::Identifier;
+use std::process::Command;
+use std::{env, str};
+use std::ffi::OsString;
+
+// Convenience re-export to allow version comparison without needing to add
+// semver crate.
+pub use semver::Version;
+
+mod errors;
+pub use errors::{Error, Result};
+
+/// Release channel of the compiler.
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
+pub enum Channel {
+    /// Development release channel
+    Dev,
+    /// Nightly release channel
+    Nightly,
+    /// Beta release channel
+    Beta,
+    /// Stable release channel
+    Stable,
+}
+
+/// Rustc version plus metada like git short hash and build date.
+#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct VersionMeta {
+    /// Version of the compiler
+    pub semver: Version,
+
+    /// Git short hash of the build of the compiler
+    pub commit_hash: Option<String>,
+
+    /// Commit date of the compiler
+    pub commit_date: Option<String>,
+
+    /// Build date of the compiler; this was removed between Rust 1.0.0 and 1.1.0.
+    pub build_date: Option<String>,
+
+    /// Release channel of the compiler
+    pub channel: Channel,
+
+    /// Host target triple of the compiler
+    pub host: String,
+
+    /// Short version string of the compiler
+    pub short_version_string: String,
+}
+
+impl VersionMeta {
+    /// Returns the version metadata for `cmd`, which should be a `rustc` command.
+    pub fn for_command(cmd: Command) -> Result<VersionMeta> {
+        let mut cmd = cmd;
+
+        let out = cmd.arg("-vV").output().map_err(Error::CouldNotExecuteCommand)?;
+        let out = str::from_utf8(&out.stdout)?;
+
+        version_meta_for(out)
+    }
+}
+
+/// Returns the `rustc` SemVer version.
+pub fn version() -> Result<Version> {
+    Ok(version_meta()?.semver)
+}
+
+/// Returns the `rustc` SemVer version and additional metadata
+/// like the git short hash and build date.
+pub fn version_meta() -> Result<VersionMeta> {
+    let cmd = env::var_os("RUSTC").unwrap_or_else(|| OsString::from("rustc"));
+
+    VersionMeta::for_command(Command::new(cmd))
+}
+
+/// Parses a "rustc -vV" output string and returns
+/// the SemVer version and additional metadata
+/// like the git short hash and build date.
+pub fn version_meta_for(verbose_version_string: &str) -> Result<VersionMeta> {
+    let out: Vec<_> = verbose_version_string.lines().collect();
+
+    if !(out.len() >= 6 && out.len() <= 8) {
+        return Err(Error::UnexpectedVersionFormat);
+    }
+
+    let short_version_string = out[0];
+
+    fn expect_prefix<'a>(line: &'a str, prefix: &str) -> Result<&'a str> {
+        if line.starts_with(prefix) {
+            Ok(&line[prefix.len()..])
+        } else {
+            Err(Error::UnexpectedVersionFormat)
+        }
+    }
+
+    let commit_hash = match expect_prefix(out[2], "commit-hash: ")? {
+        "unknown" => None,
+        hash => Some(hash.to_owned()),
+    };
+
+    let commit_date = match expect_prefix(out[3], "commit-date: ")? {
+        "unknown" => None,
+        hash => Some(hash.to_owned()),
+    };
+
+    // Handle that the build date may or may not be present.
+    let mut idx = 4;
+    let mut build_date = None;
+    if out[idx].starts_with("build-date") {
+        build_date = match expect_prefix(out[idx], "build-date: ")? {
+            "unknown" => None,
+            s => Some(s.to_owned()),
+        };
+        idx += 1;
+    }
+
+    let host = expect_prefix(out[idx], "host: ")?;
+    idx += 1;
+    let release = expect_prefix(out[idx], "release: ")?;
+
+    let semver: Version = release.parse()?;
+
+    let channel = if semver.pre.is_empty() {
+        Channel::Stable
+    } else {
+        match semver.pre[0] {
+            Identifier::AlphaNumeric(ref s) if s == "dev" => Channel::Dev,
+            Identifier::AlphaNumeric(ref s) if s == "beta" => Channel::Beta,
+            Identifier::AlphaNumeric(ref s) if s == "nightly" => Channel::Nightly,
+            ref x => return Err(Error::UnknownPreReleaseTag(x.clone())),
+        }
+    };
+
+    Ok(VersionMeta {
+        semver: semver,
+        commit_hash: commit_hash,
+        commit_date: commit_date,
+        build_date: build_date,
+        channel: channel,
+        host: host.into(),
+        short_version_string: short_version_string.into(),
+    })
+}
+
+#[test]
+fn smoketest() {
+    let v = version().unwrap();
+    assert!(v.major >= 1);
+
+    let v = version_meta().unwrap();
+    assert!(v.semver.major >= 1);
+
+    assert!(version().unwrap() >= Version::parse("1.0.0").unwrap());
+}
+
+#[test]
+fn parse_unexpected() {
+    let res = version_meta_for(
+"rustc 1.0.0 (a59de37e9 2015-05-13) (built 2015-05-14)
+binary: rustc
+commit-hash: a59de37e99060162a2674e3ff45409ac73595c0e
+commit-date: 2015-05-13
+rust-birthday: 2015-05-14
+host: x86_64-unknown-linux-gnu
+release: 1.0.0");
+
+    assert!(match res {
+        Err(Error::UnexpectedVersionFormat) => true,
+        _ => false,
+    });
+
+}
+
+#[test]
+fn parse_1_0_0() {
+    let version = version_meta_for(
+"rustc 1.0.0 (a59de37e9 2015-05-13) (built 2015-05-14)
+binary: rustc
+commit-hash: a59de37e99060162a2674e3ff45409ac73595c0e
+commit-date: 2015-05-13
+build-date: 2015-05-14
+host: x86_64-unknown-linux-gnu
+release: 1.0.0").unwrap();
+
+    assert_eq!(version.semver, Version::parse("1.0.0").unwrap());
+    assert_eq!(version.commit_hash, Some("a59de37e99060162a2674e3ff45409ac73595c0e".into()));
+    assert_eq!(version.commit_date, Some("2015-05-13".into()));
+    assert_eq!(version.build_date, Some("2015-05-14".into()));
+    assert_eq!(version.channel, Channel::Stable);
+    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
+    assert_eq!(version.short_version_string, "rustc 1.0.0 (a59de37e9 2015-05-13) (built 2015-05-14)");
+}
+
+
+#[test]
+fn parse_unknown() {
+    let version = version_meta_for(
+"rustc 1.3.0
+binary: rustc
+commit-hash: unknown
+commit-date: unknown
+host: x86_64-unknown-linux-gnu
+release: 1.3.0").unwrap();
+
+    assert_eq!(version.semver, Version::parse("1.3.0").unwrap());
+    assert_eq!(version.commit_hash, None);
+    assert_eq!(version.commit_date, None);
+    assert_eq!(version.channel, Channel::Stable);
+    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
+    assert_eq!(version.short_version_string, "rustc 1.3.0");
+}
+
+#[test]
+fn parse_nightly() {
+    let version = version_meta_for(
+"rustc 1.5.0-nightly (65d5c0833 2015-09-29)
+binary: rustc
+commit-hash: 65d5c083377645a115c4ac23a620d3581b9562b6
+commit-date: 2015-09-29
+host: x86_64-unknown-linux-gnu
+release: 1.5.0-nightly").unwrap();
+
+    assert_eq!(version.semver, Version::parse("1.5.0-nightly").unwrap());
+    assert_eq!(version.commit_hash, Some("65d5c083377645a115c4ac23a620d3581b9562b6".into()));
+    assert_eq!(version.commit_date, Some("2015-09-29".into()));
+    assert_eq!(version.channel, Channel::Nightly);
+    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
+    assert_eq!(version.short_version_string, "rustc 1.5.0-nightly (65d5c0833 2015-09-29)");
+}
+
+#[test]
+fn parse_stable() {
+    let version = version_meta_for(
+"rustc 1.3.0 (9a92aaf19 2015-09-15)
+binary: rustc
+commit-hash: 9a92aaf19a64603b02b4130fe52958cc12488900
+commit-date: 2015-09-15
+host: x86_64-unknown-linux-gnu
+release: 1.3.0").unwrap();
+
+    assert_eq!(version.semver, Version::parse("1.3.0").unwrap());
+    assert_eq!(version.commit_hash, Some("9a92aaf19a64603b02b4130fe52958cc12488900".into()));
+    assert_eq!(version.commit_date, Some("2015-09-15".into()));
+    assert_eq!(version.channel, Channel::Stable);
+    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
+    assert_eq!(version.short_version_string, "rustc 1.3.0 (9a92aaf19 2015-09-15)");
+}
+
+#[test]
+fn parse_1_16_0_nightly() {
+    let version = version_meta_for(
+"rustc 1.16.0-nightly (5d994d8b7 2017-01-05)
+binary: rustc
+commit-hash: 5d994d8b7e482e87467d4a521911477bd8284ce3
+commit-date: 2017-01-05
+host: x86_64-unknown-linux-gnu
+release: 1.16.0-nightly
+LLVM version: 3.9").unwrap();
+
+    assert_eq!(version.semver, Version::parse("1.16.0-nightly").unwrap());
+    assert_eq!(version.commit_hash, Some("5d994d8b7e482e87467d4a521911477bd8284ce3".into()));
+    assert_eq!(version.commit_date, Some("2017-01-05".into()));
+    assert_eq!(version.channel, Channel::Nightly);
+    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
+    assert_eq!(version.short_version_string, "rustc 1.16.0-nightly (5d994d8b7 2017-01-05)");
+}
+
+/*
+#[test]
+fn version_matches_replacement() {
+    let f = |s1: &str, s2: &str| {
+        let a = Version::parse(s1).unwrap();
+        let b = Version::parse(s2).unwrap();
+        println!("{} <= {} : {}", s1, s2, a <= b);
+    };
+
+    println!();
+
+    f("1.5.0",         "1.5.0");
+    f("1.5.0-nightly", "1.5.0");
+    f("1.5.0",         "1.5.0-nightly");
+    f("1.5.0-nightly", "1.5.0-nightly");
+
+    f("1.5.0",         "1.6.0");
+    f("1.5.0-nightly", "1.6.0");
+    f("1.5.0",         "1.6.0-nightly");
+    f("1.5.0-nightly", "1.6.0-nightly");
+
+    panic!();
+
+}
+*/
diff --git a/rustc_deps/vendor/scopeguard/.cargo-checksum.json b/rustc_deps/vendor/scopeguard/.cargo-checksum.json
new file mode 100644
index 0000000..8657b29
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"015263d52db02d239b69b9e3f090c2902056a80b8625e37a83bf4af79def93f7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.rst":"e53c8b52d8c641b866ebc169fed1d2b154af70296ddf5e087e454de03a7e6040","examples/readme.rs":"5a01391acf2acc52a7a2e0ba58dc8ded3e8cc57d54b45778af5e8ba577158f86","src/lib.rs":"230fd73e62f7d43cd65931c16ed0083d2d4bfcd899abcf5215728732e0e601df"},"package":"94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/scopeguard/Cargo.toml b/rustc_deps/vendor/scopeguard/Cargo.toml
new file mode 100644
index 0000000..4a0d52b
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/Cargo.toml
@@ -0,0 +1,28 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "scopeguard"
+version = "0.3.3"
+authors = ["bluss"]
+description = "A RAII scope guard that will run a given closure when it goes out of scope,\neven if the code between panics (assuming unwinding panic).\n\nDefines the macros `defer!` and `defer_on_unwind!`; the latter only runs\nif the scope is extited through unwinding on panic.\n"
+documentation = "https://docs.rs/scopeguard/"
+keywords = ["scope-guard", "defer", "panic"]
+categories = ["rust-patterns"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/bluss/scopeguard"
+[package.metadata.release]
+no-dev-version = true
+
+[features]
+default = ["use_std"]
+use_std = []
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/scopeguard/LICENSE-APACHE
similarity index 88%
copy from rustc_deps/vendor/parking_lot_core/LICENSE
copy to rustc_deps/vendor/scopeguard/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/scopeguard/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/scopeguard/LICENSE-MIT b/rustc_deps/vendor/scopeguard/LICENSE-MIT
new file mode 100644
index 0000000..e69282e
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2015 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/scopeguard/README.rst b/rustc_deps/vendor/scopeguard/README.rst
new file mode 100644
index 0000000..d4699ea
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/README.rst
@@ -0,0 +1,81 @@
+
+scopeguard
+==========
+
+Rust crate for a convenient RAII scope guard that will run a given closure when
+it goes out of scope, even if the code between panics (assuming unwinding panic).
+
+The `defer!` macro and `guard` are `no_std` compatible (require only core),
+but the on unwinding strategy requires linking to `std`.
+
+Requires Rust 1.11.
+
+
+Please read the `API documentation here`__
+
+__ https://docs.rs/scopeguard/
+
+|build_status|_ |crates|_
+
+.. |build_status| image:: https://travis-ci.org/bluss/scopeguard.svg
+.. _build_status: https://travis-ci.org/bluss/scopeguard
+
+.. |crates| image:: http://meritbadge.herokuapp.com/scopeguard
+.. _crates: https://crates.io/crates/scopeguard
+
+How to use
+----------
+
+.. code:: rust
+
+    #[macro_use(defer)] extern crate scopeguard;
+
+    use scopeguard::guard;
+
+    fn f() {
+        defer!(println!("Called at return or panic"));
+        panic!();
+    }
+
+    use std::fs::File;
+    use std::io::Write;
+
+    fn g() {
+        let f = File::create("newfile.txt").unwrap();
+        let mut file = guard(f, |f| {
+            // write file at return or panic
+            let _ = f.sync_all();
+        });
+        // Access the file through the scope guard itself
+        file.write(b"test me\n").unwrap();
+    }
+
+Recent Changes
+--------------
+
+- 0.3.3
+
+  - Use ``#[inline]`` on a few more functions by @stjepang (#14)
+  - Add examples to crate documentation
+
+- 0.3.2
+
+  - Add crate categories
+
+- 0.3.1
+
+  - Add ``defer_on_unwind!``, ``Strategy`` trait
+  - Rename ``Guard`` → ``ScopeGuard``
+  - Add ``ScopeGuard::with_strategy``.
+  - ``ScopeGuard`` now implements ``Debug``.
+  - Require Rust 1.11
+
+- 0.2.0
+
+  - Require Rust 1.6
+  - Use `no_std` unconditionally
+  - No other changes
+
+- 0.1.2
+
+  - Add macro ``defer!()``
diff --git a/rustc_deps/vendor/scopeguard/examples/readme.rs b/rustc_deps/vendor/scopeguard/examples/readme.rs
new file mode 100644
index 0000000..904bc54
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/examples/readme.rs
@@ -0,0 +1,27 @@
+
+#[macro_use(defer)] extern crate scopeguard;
+
+use scopeguard::guard;
+
+fn f() {
+    defer!(println!("Called at return or panic"));
+    panic!();
+}
+
+use std::fs::File;
+use std::io::Write;
+
+fn g() {
+    let f = File::create("newfile.txt").unwrap();
+    let mut file = guard(f, |f| {
+        // write file at return or panic
+        let _ = f.sync_all();
+    });
+    // Access the file through the scope guard itself
+    file.write(b"test me\n").unwrap();
+}
+
+fn main() {
+    f();
+    g();
+}
diff --git a/rustc_deps/vendor/scopeguard/src/lib.rs b/rustc_deps/vendor/scopeguard/src/lib.rs
new file mode 100644
index 0000000..ff3929a
--- /dev/null
+++ b/rustc_deps/vendor/scopeguard/src/lib.rs
@@ -0,0 +1,409 @@
+#![cfg_attr(not(any(test, feature = "use_std")), no_std)]
+
+//! A scope guard will run a given closure when it goes out of scope,
+//! even if the code between panics.
+//! (as long as panic doesn't abort)
+//!
+//! # Examples
+//!
+//! ## `defer!`
+//!
+//! Use the `defer` macro to run an operation at scope exit,
+//! either regular scope exit or during unwinding from a panic.
+//!
+//! ```
+//! #[macro_use(defer)] extern crate scopeguard;
+//!
+//! use std::cell::Cell;
+//!
+//! fn main() {
+//!     // use a cell to observe drops during and after the scope guard is active
+//!     let drop_counter = Cell::new(0);
+//!     {
+//!         // Create a scope guard using `defer!` for the current scope
+//!         defer! {{
+//!             drop_counter.set(1 + drop_counter.get());
+//!         }};
+//!
+//!         // Do regular operations here in the meantime.
+//!
+//!         // Just before scope exit: it hasn't run yet.
+//!         assert_eq!(drop_counter.get(), 0);
+//!
+//!         // The following scope end is where the defer closure is called
+//!     }
+//!     assert_eq!(drop_counter.get(), 1);
+//! }
+//! ```
+//!
+//! ## Scope Guard with Value
+//!
+//! If the scope guard closure needs to access an outer value that is also
+//! mutated outside of the scope guard, then you may want to use the scope guard
+//! with a value. The guard works like a smart pointer, so the inner value can
+//! be accessed by reference or by mutable reference.
+//!
+//! ### 1. The guard owns a file
+//!
+//! In this example, the scope guard owns a file and ensures pending writes are
+//! synced at scope exit.
+//!
+//! ```
+//! extern crate scopeguard;
+//! 
+//! use std::fs::File;
+//! use std::io::{self, Write};
+//! 
+//! fn try_main() -> io::Result<()> {
+//!     let f = File::create("newfile.txt")?;
+//!     let mut file = scopeguard::guard(f, |f| {
+//!         // ensure we flush file at return or panic
+//!         let _ = f.sync_all();
+//!     });
+//!     // Access the file through the scope guard itself
+//!     file.write(b"test me\n").map(|_| ())
+//! }
+//!
+//! fn main() {
+//!     try_main().unwrap();
+//! }
+//!
+//! ```
+//!
+//! ### 2. The guard restores an invariant on scope exit
+//!
+//! ```
+//! extern crate scopeguard;
+//!
+//! use std::mem::ManuallyDrop;
+//! use std::ptr;
+//!
+//! // This function, just for this example, takes the first element
+//! // and inserts it into the assumed sorted tail of the vector.
+//! //
+//! // For optimization purposes we temporarily violate an invariant of the
+//! // Vec, that it owns all of its elements.
+//! // 
+//! // The safe approach is to use swap, which means two writes to memory,
+//! // the optimization is to use a “hole” which uses only one write of memory
+//! // for each position it moves.
+//! //
+//! // We *must* use a scope guard to run this code safely. We
+//! // are running arbitrary user code (comparison operators) that may panic.
+//! // The scope guard ensures we restore the invariant after successful
+//! // exit or during unwinding from panic.
+//! fn insertion_sort_first<T>(v: &mut Vec<T>)
+//!     where T: PartialOrd
+//! {
+//!     struct Hole<'a, T: 'a> {
+//!         v: &'a mut Vec<T>,
+//!         index: usize,
+//!         value: ManuallyDrop<T>,
+//!     }
+//!
+//!     unsafe {
+//!         // Create a moved-from location in the vector, a “hole”.
+//!         let value = ptr::read(&v[0]);
+//!         let mut hole = Hole { v: v, index: 0, value: ManuallyDrop::new(value) };
+//!
+//!         // Use a scope guard with a value.
+//!         // At scope exit, plug the hole so that the vector is fully
+//!         // initialized again.
+//!         // The scope guard owns the hole, but we can access it through the guard.
+//!         let mut hole_guard = scopeguard::guard(hole, |hole| {
+//!             // plug the hole in the vector with the value that was // taken out
+//!             let index = hole.index;
+//!             ptr::copy_nonoverlapping(&*hole.value, &mut hole.v[index], 1);
+//!         });
+//!
+//!         // run algorithm that moves the hole in the vector here
+//!         // move the hole until it's in a sorted position
+//!         for i in 1..hole_guard.v.len() {
+//!             if *hole_guard.value >= hole_guard.v[i] {
+//!                 // move the element back and the hole forward
+//!                 let index = hole_guard.index;
+//!                 ptr::copy_nonoverlapping(&hole_guard.v[index + 1], &mut hole_guard.v[index], 1);
+//!                 hole_guard.index += 1;
+//!             } else {
+//!                 break;
+//!             }
+//!         }
+//!
+//!         // When the scope exits here, the Vec becomes whole again!
+//!     }
+//! }
+//!
+//! fn main() {
+//!     let string = String::from;
+//!     let mut data = vec![string("c"), string("a"), string("b"), string("d")];
+//!     insertion_sort_first(&mut data);
+//!     assert_eq!(data, vec!["a", "b", "c", "d"]);
+//! }
+//!
+//! ```
+//!
+//!
+//! # Crate features:
+//!
+//! - `use_std`
+//!   + Enabled by default. Enables the `OnUnwind` strategy.
+//!   + Disable to use `no_std`.
+
+#[cfg(not(any(test, feature = "use_std")))]
+extern crate core as std;
+
+use std::fmt;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut};
+
+pub trait Strategy {
+    /// Return `true` if the guard’s associated code should run
+    /// (in the context where this method is called).
+    fn should_run() -> bool;
+}
+
+/// Always run on scope exit.
+///
+/// “Always” run: on regular exit from a scope or on unwinding from a panic.
+/// Can not run on abort, process exit, and other catastrophic events where
+/// destructors don’t run.
+#[derive(Debug)]
+pub enum Always {}
+
+/// Run on scope exit through unwinding.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[derive(Debug)]
+pub enum OnUnwind {}
+
+/// Run on regular scope exit, when not unwinding.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[derive(Debug)]
+#[cfg(test)]
+enum OnSuccess {}
+
+impl Strategy for Always {
+    #[inline(always)]
+    fn should_run() -> bool { true }
+}
+
+#[cfg(feature = "use_std")]
+impl Strategy for OnUnwind {
+    #[inline(always)]
+    fn should_run() -> bool { std::thread::panicking() }
+}
+
+#[cfg(feature = "use_std")]
+#[cfg(test)]
+impl Strategy for OnSuccess {
+    #[inline(always)]
+    fn should_run() -> bool { !std::thread::panicking() }
+}
+
+/// Macro to create a `ScopeGuard` (always run).
+///
+/// The macro takes one expression `$e`, which is the body of a closure
+/// that will run when the scope is exited. The expression can
+/// be a whole block.
+#[macro_export]
+macro_rules! defer {
+    ($e:expr) => {
+        let _guard = $crate::guard((), |_| $e);
+    }
+}
+
+/// Macro to create a `ScopeGuard` (run on successful scope exit).
+///
+/// The macro takes one expression `$e`, which is the body of a closure
+/// that will run when the scope is exited. The expression can
+/// be a whole block.
+///
+/// Requires crate feature `use_std`.
+#[cfg(test)]
+macro_rules! defer_on_success {
+    ($e:expr) => {
+        let _guard = $crate::guard_on_success((), |_| $e);
+    }
+}
+
+/// Macro to create a `ScopeGuard` (run on unwinding from panic).
+///
+/// The macro takes one expression `$e`, which is the body of a closure
+/// that will run when the scope is exited. The expression can
+/// be a whole block.
+///
+/// Requires crate feature `use_std`.
+#[macro_export]
+macro_rules! defer_on_unwind {
+    ($e:expr) => {
+        let _guard = $crate::guard_on_unwind((), |_| $e);
+    }
+}
+
+/// `ScopeGuard` is a scope guard that may own a protected value.
+///
+/// If you place a guard in a local variable, the closure can
+/// run regardless how you leave the scope — through regular return or panic
+/// (except if panic or other code aborts; so as long as destructors run).
+/// It is run only once.
+///
+/// The `S` parameter for [`Strategy`](Strategy.t.html) determines if
+/// the closure actually runs.
+///
+/// The guard's closure will be called with a mut ref to the held value
+/// in the destructor. It's called only once.
+///
+/// The `ScopeGuard` implements `Deref` so that you can access the inner value.
+pub struct ScopeGuard<T, F, S: Strategy = Always>
+    where F: FnMut(&mut T)
+{
+    __dropfn: F,
+    __value: T,
+    strategy: PhantomData<S>,
+}
+impl<T, F, S> ScopeGuard<T, F, S>
+    where F: FnMut(&mut T),
+          S: Strategy,
+{
+    /// Create a `ScopeGuard` that owns `v` (accessible through deref) and calls
+    /// `dropfn` when its destructor runs.
+    ///
+    /// The `Strategy` decides whether the scope guard's closure should run.
+    #[inline]
+    pub fn with_strategy(v: T, dropfn: F) -> ScopeGuard<T, F, S> {
+        ScopeGuard {
+            __value: v,
+            __dropfn: dropfn,
+            strategy: PhantomData,
+        }
+    }
+}
+
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+#[inline]
+pub fn guard<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, Always>
+    where F: FnMut(&mut T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[cfg(test)]
+#[inline]
+fn guard_on_success<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnSuccess>
+    where F: FnMut(&mut T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[inline]
+pub fn guard_on_unwind<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnUnwind>
+    where F: FnMut(&mut T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+impl<T, F, S: Strategy> Deref for ScopeGuard<T, F, S>
+    where F: FnMut(&mut T)
+{
+    type Target = T;
+    fn deref(&self) -> &T {
+        &self.__value
+    }
+
+}
+
+impl<T, F, S: Strategy> DerefMut for ScopeGuard<T, F, S>
+    where F: FnMut(&mut T)
+{
+    fn deref_mut(&mut self) -> &mut T {
+        &mut self.__value
+    }
+}
+
+impl<T, F, S: Strategy> Drop for ScopeGuard<T, F, S>
+    where F: FnMut(&mut T)
+{
+    fn drop(&mut self) {
+        if S::should_run() {
+            (self.__dropfn)(&mut self.__value)
+        }
+    }
+}
+
+impl<T, F, S> fmt::Debug for ScopeGuard<T, F, S>
+    where T: fmt::Debug,
+          F: FnMut(&mut T),
+          S: Strategy + fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("ScopeGuard")
+         .field("value", &self.__value)
+         .finish()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::cell::Cell;
+    use std::panic::catch_unwind;
+    use std::panic::AssertUnwindSafe;
+
+    #[test]
+    fn test_defer() {
+        let drops = Cell::new(0);
+        defer!(drops.set(1000));
+        assert_eq!(drops.get(), 0);
+    }
+
+    #[test]
+    fn test_defer_success_1() {
+        let drops = Cell::new(0);
+        {
+            defer_on_success!(drops.set(1));
+            assert_eq!(drops.get(), 0);
+        }
+        assert_eq!(drops.get(), 1);
+    }
+
+    #[test]
+    fn test_defer_success_2() {
+        let drops = Cell::new(0);
+        let _ = catch_unwind(AssertUnwindSafe(|| {
+            defer_on_success!(drops.set(1));
+            panic!("failure")
+        }));
+        assert_eq!(drops.get(), 0);
+    }
+
+    #[test]
+    fn test_defer_unwind_1() {
+        let drops = Cell::new(0);
+        let _ = catch_unwind(AssertUnwindSafe(|| {
+            defer_on_unwind!(drops.set(1));
+            assert_eq!(drops.get(), 0);
+            panic!("failure")
+        }));
+        assert_eq!(drops.get(), 1);
+    }
+
+    #[test]
+    fn test_defer_unwind_2() {
+        let drops = Cell::new(0);
+        {
+            defer_on_unwind!(drops.set(1));
+        }
+        assert_eq!(drops.get(), 0);
+    }
+}
diff --git a/rustc_deps/vendor/semver-parser/.cargo-checksum.json b/rustc_deps/vendor/semver-parser/.cargo-checksum.json
new file mode 100644
index 0000000..73575fe
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"67597114802114d2a7fdb457c1cf5f7e0c951b21e287c6a47b9a86b9028cf64d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d38feaa4f9468cd1e0ece22e0ad2eadfe6195a9a0a3843b7c722d5c7d81804fb","src/common.rs":"dc42336abd34e19ca9f732f33657e106f98dcc8c10d4c2564bc4f160cb31926e","src/lib.rs":"3ac8ef5a280344a25cb18ac386034c0fee8d64060fa14af5e25ed49f0cb2fd9e","src/range.rs":"3596f048d466d43887aff1e8c8c834476672a4627631ed35379c35466b5f02ec","src/recognize.rs":"9f16eda9fcd7d8af7eee4c3b89c611bd648040273fde6b35778f8a50b004c8b1","src/version.rs":"dbd91a4e4fd92a0aa9eb4f858ecbc1ecd680aa60572cc2ad2085e5c5c30e5b77"},"package":"388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/semver-parser/Cargo.toml b/rustc_deps/vendor/semver-parser/Cargo.toml
new file mode 100644
index 0000000..c2be878
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "semver-parser"
+version = "0.7.0"
+authors = ["Steve Klabnik <steve@steveklabnik.com>"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/steveklabnik/semver-parser"
+homepage = "https://github.com/steveklabnik/semver-parser"
+documentation = "https://docs.rs/semver-parser"
+description = """
+Parsing of the semver spec.
+"""
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/semver-parser/LICENSE-APACHE
similarity index 88%
copy from rustc_deps/vendor/parking_lot_core/LICENSE
copy to rustc_deps/vendor/semver-parser/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/semver-parser/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/semver-parser/LICENSE-MIT b/rustc_deps/vendor/semver-parser/LICENSE-MIT
new file mode 100644
index 0000000..fb7494a
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 Steve Klabnik
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/semver-parser/src/common.rs b/rustc_deps/vendor/semver-parser/src/common.rs
new file mode 100644
index 0000000..267b4d9
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/src/common.rs
@@ -0,0 +1,66 @@
+use version::Identifier;
+use recognize::{Recognize, Alt, OneOrMore, Inclusive, OneByte};
+use std::str::from_utf8;
+
+// by the time we get here, we know that it's all valid characters, so this doesn't need to return
+// a result or anything
+fn parse_meta(s: &str) -> Vec<Identifier> {
+    // Originally, I wanted to implement this method via calling parse, but parse is tolerant of
+    // leading zeroes, and we want anything with leading zeroes to be considered alphanumeric, not
+    // numeric. So the strategy is to check with a recognizer first, and then call parse once
+    // we've determined that it's a number without a leading zero.
+    s.split(".")
+        .map(|part| {
+            // another wrinkle: we made sure that any number starts with a
+            // non-zero. But there's a problem: an actual zero is a number, yet
+            // gets left out by this heuristic. So let's also check for the
+            // single, lone zero.
+            if is_alpha_numeric(part) {
+                Identifier::AlphaNumeric(part.to_string())
+            } else {
+                // we can unwrap here because we know it is only digits due to the regex
+                Identifier::Numeric(part.parse().unwrap())
+            }
+        }).collect()
+}
+
+// parse optional metadata (preceded by the prefix character)
+pub fn parse_optional_meta(s: &[u8], prefix_char: u8)-> Result<(Vec<Identifier>, usize), String> {
+    if let Some(len) = prefix_char.p(s) {
+        let start = len;
+        if let Some(len) = letters_numbers_dash_dot(&s[start..]) {
+            let end = start + len;
+            Ok((parse_meta(from_utf8(&s[start..end]).unwrap()), end))
+        } else {
+            Err("Error parsing prerelease".to_string())
+        }
+    } else {
+        Ok((Vec::new(), 0))
+    }
+}
+
+pub fn is_alpha_numeric(s: &str) -> bool {
+    if let Some((_val, len)) = numeric_identifier(s.as_bytes()) {
+        // Return true for number with leading zero
+        // Note: doing it this way also handily makes overflow fail over.
+        len != s.len()
+    } else {
+        true
+    }
+}
+
+// Note: could plumb overflow error up to return value as Result
+pub fn numeric_identifier(s: &[u8]) -> Option<(u64, usize)> {
+    if let Some(len) = Alt(b'0', OneOrMore(Inclusive(b'0'..b'9'))).p(s) {
+        from_utf8(&s[0..len]).unwrap().parse().ok().map(|val| (val, len))
+    } else {
+        None
+    }
+}
+
+pub fn letters_numbers_dash_dot(s: &[u8]) -> Option<usize> {
+    OneOrMore(OneByte(|c| c == b'-' || c == b'.' ||
+        (b'0' <= c && c <= b'9') ||
+        (b'a' <= c && c <= b'z') ||
+        (b'A' <= c && c <= b'Z'))).p(s)
+}
diff --git a/rustc_deps/vendor/semver-parser/src/lib.rs b/rustc_deps/vendor/semver-parser/src/lib.rs
new file mode 100644
index 0000000..3b0d8f0
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/src/lib.rs
@@ -0,0 +1,8 @@
+pub mod version;
+pub mod range;
+
+// for private stuff the two share
+mod common;
+
+// for recognizer combinators
+mod recognize;
diff --git a/rustc_deps/vendor/semver-parser/src/range.rs b/rustc_deps/vendor/semver-parser/src/range.rs
new file mode 100644
index 0000000..858be9f
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/src/range.rs
@@ -0,0 +1,696 @@
+use common::{self, numeric_identifier, letters_numbers_dash_dot};
+use version::Identifier;
+use std::str::{FromStr, from_utf8};
+use recognize::*;
+
+#[derive(Debug)]
+pub struct VersionReq {
+    pub predicates: Vec<Predicate>,
+}
+
+#[derive(PartialEq,Debug)]
+pub enum WildcardVersion {
+    Major,
+    Minor,
+    Patch,
+}
+
+#[derive(PartialEq,Debug)]
+pub enum Op {
+    Ex, // Exact
+    Gt, // Greater than
+    GtEq, // Greater than or equal to
+    Lt, // Less than
+    LtEq, // Less than or equal to
+    Tilde, // e.g. ~1.0.0
+    Compatible, // compatible by definition of semver, indicated by ^
+    Wildcard(WildcardVersion), // x.y.*, x.*, *
+}
+
+impl FromStr for Op {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Op, String> {
+        match s {
+            "=" => Ok(Op::Ex),
+            ">" => Ok(Op::Gt),
+            ">=" => Ok(Op::GtEq),
+            "<" => Ok(Op::Lt),
+            "<=" => Ok(Op::LtEq),
+            "~" => Ok(Op::Tilde),
+            "^" => Ok(Op::Compatible),
+            _ => Err(String::from("Could not parse Op")),
+        }
+    }
+}
+
+#[derive(PartialEq,Debug)]
+pub struct Predicate {
+    pub op: Op,
+    pub major: u64,
+    pub minor: Option<u64>,
+    pub patch: Option<u64>,
+    pub pre: Vec<Identifier>,
+}
+
+fn numeric_or_wild(s: &[u8]) -> Option<(Option<u64>, usize)> {
+    if let Some((val, len)) = numeric_identifier(s) {
+        Some((Some(val), len))
+    } else if let Some(len) = OneOf(b"*xX").p(s) {
+        Some((None, len))
+    } else {
+        None
+    }
+}
+
+fn dot_numeric_or_wild(s: &[u8]) -> Option<(Option<u64>, usize)> {
+    b'.'.p(s).and_then(|len|
+        numeric_or_wild(&s[len..]).map(|(val, len2)| (val, len + len2))
+    )
+}
+
+fn operation(s: &[u8]) -> Option<(Op, usize)> {
+    if let Some(len) = "=".p(s) {
+        Some((Op::Ex, len))
+    } else if let Some(len) = ">=".p(s) {
+        Some((Op::GtEq, len))
+    } else if let Some(len) = ">".p(s) {
+        Some((Op::Gt, len))
+    } else if let Some(len) = "<=".p(s) {
+        Some((Op::LtEq, len))
+    } else if let Some(len) = "<".p(s) {
+        Some((Op::Lt, len))
+    } else if let Some(len) = "~".p(s) {
+        Some((Op::Tilde, len))
+    } else if let Some(len) = "^".p(s) {
+        Some((Op::Compatible, len))
+    } else {
+        None
+    }
+}
+
+fn whitespace(s: &[u8]) -> Option<usize> {
+    ZeroOrMore(OneOf(b"\t\r\n ")).p(s)
+}
+
+pub fn parse_predicate(range: &str) -> Result<Predicate, String> {
+    let s = range.trim().as_bytes();
+    let mut i = 0;
+    let mut operation = if let Some((op, len)) = operation(&s[i..]) {
+        i += len;
+        op
+    } else {
+        // operations default to Compatible
+        Op::Compatible
+    };
+    if let Some(len) = whitespace.p(&s[i..]) {
+        i += len;
+    }
+    let major = if let Some((major, len)) = numeric_identifier(&s[i..]) {
+        i += len;
+        major
+    } else {
+        return Err("Error parsing major version number: ".to_string());
+    };
+    let minor = if let Some((minor, len)) = dot_numeric_or_wild(&s[i..]) {
+        i += len;
+        if minor.is_none() {
+            operation = Op::Wildcard(WildcardVersion::Minor);
+        }
+        minor
+    } else {
+        None
+    };
+    let patch = if let Some((patch, len)) = dot_numeric_or_wild(&s[i..]) {
+        i += len;
+        if patch.is_none() {
+            operation = Op::Wildcard(WildcardVersion::Patch);
+        }
+        patch
+    } else {
+        None
+    };
+    let (pre, pre_len) = common::parse_optional_meta(&s[i..], b'-')?;
+    i += pre_len;
+    if let Some(len) = (b'+', letters_numbers_dash_dot).p(&s[i..]) {
+        i += len;
+    }
+    if i != s.len() {
+        return Err("Extra junk after valid predicate: ".to_string() +
+            from_utf8(&s[i..]).unwrap());
+    }
+    Ok(Predicate {
+        op: operation,
+        major: major,
+        minor: minor,
+        patch: patch,
+        pre: pre,
+    })
+}
+
+pub fn parse(ranges: &str) -> Result<VersionReq, String> {
+    // null is an error
+    if ranges == "\0" {
+        return Err(String::from("Null is not a valid VersionReq"));
+    }
+
+    // an empty range is a major version wildcard
+    // so is a lone * or x of either capitalization
+    if (ranges == "")
+    || (ranges == "*")
+    || (ranges == "x")
+    || (ranges == "X") {
+        return Ok(VersionReq {
+            predicates: vec![Predicate {
+                op: Op::Wildcard(WildcardVersion::Major),
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            }],
+        });
+    }
+
+
+    let ranges = ranges.trim();
+
+    let predicates: Result<Vec<_>, String> = ranges
+        .split(",")
+        .map(|range| {
+            parse_predicate(range)
+        })
+        .collect();
+
+    let predicates = try!(predicates);
+
+    if predicates.len() == 0 {
+        return Err(String::from("VersionReq did not parse properly"));
+    }
+
+    Ok(VersionReq {
+        predicates: predicates,
+    })
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use range;
+    use version::Identifier;
+
+    #[test]
+    fn test_parsing_default() {
+        let r = range::parse("1.0.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 1,
+                minor: Some(0),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_exact_01() {
+        let r = range::parse("=1.0.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Ex,
+                major: 1,
+                minor: Some(0),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_exact_02() {
+        let r = range::parse("=0.9.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Ex,
+                major: 0,
+                minor: Some(9),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_exact_03() {
+        let r = range::parse("=0.1.0-beta2.a").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Ex,
+                major: 0,
+                minor: Some(1),
+                patch: Some(0),
+                pre: vec![Identifier::AlphaNumeric(String::from("beta2")),
+                          Identifier::AlphaNumeric(String::from("a"))],
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_greater_than() {
+        let r = range::parse("> 1.0.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Gt,
+                major: 1,
+                minor: Some(0),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_greater_than_01() {
+        let r = range::parse(">= 1.0.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::GtEq,
+                major: 1,
+                minor: Some(0),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_greater_than_02() {
+        let r = range::parse(">= 2.1.0-alpha2").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::GtEq,
+                major: 2,
+                minor: Some(1),
+                patch: Some(0),
+                pre: vec![Identifier::AlphaNumeric(String::from("alpha2"))],
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_less_than() {
+        let r = range::parse("< 1.0.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Lt,
+                major: 1,
+                minor: Some(0),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_less_than_eq() {
+        let r = range::parse("<= 2.1.0-alpha2").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::LtEq,
+                major: 2,
+                minor: Some(1),
+                patch: Some(0),
+                pre: vec![Identifier::AlphaNumeric(String::from("alpha2"))],
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_tilde() {
+        let r = range::parse("~1").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Tilde,
+                major: 1,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_parsing_compatible() {
+        let r = range::parse("^0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_blank() {
+        let r = range::parse("").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Major),
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_wildcard() {
+        let r = range::parse("*").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Major),
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_x() {
+        let r = range::parse("x").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Major),
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_capital_x() {
+        let r = range::parse("X").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Major),
+                major: 0,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_minor_wildcard_star() {
+        let r = range::parse("1.*").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Minor),
+                major: 1,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_minor_wildcard_x() {
+        let r = range::parse("1.x").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Minor),
+                major: 1,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_minor_wildcard_capital_x() {
+        let r = range::parse("1.X").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Minor),
+                major: 1,
+                minor: None,
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_patch_wildcard_star() {
+        let r = range::parse("1.2.*").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Patch),
+                major: 1,
+                minor: Some(2),
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_patch_wildcard_x() {
+        let r = range::parse("1.2.x").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Patch),
+                major: 1,
+                minor: Some(2),
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    fn test_parsing_patch_wildcard_capital_x() {
+        let r = range::parse("1.2.X").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Wildcard(WildcardVersion::Patch),
+                major: 1,
+                minor: Some(2),
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+    }
+
+    #[test]
+    pub fn test_multiple_01() {
+        let r = range::parse("> 0.0.9, <= 2.5.3").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Gt,
+                major: 0,
+                minor: Some(0),
+                patch: Some(9),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::LtEq,
+                major: 2,
+                minor: Some(5),
+                patch: Some(3),
+                pre: Vec::new(),
+            },
+            r.predicates[1]
+        );
+    }
+
+    #[test]
+    pub fn test_multiple_02() {
+        let r = range::parse("0.3.0, 0.4.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: Some(3),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: Some(4),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[1]
+        );
+    }
+
+    #[test]
+    pub fn test_multiple_03() {
+        let r = range::parse("<= 0.2.0, >= 0.5.0").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::LtEq,
+                major: 0,
+                minor: Some(2),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::GtEq,
+                major: 0,
+                minor: Some(5),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[1]
+        );
+    }
+
+    #[test]
+    pub fn test_multiple_04() {
+        let r = range::parse("0.1.0, 0.1.4, 0.1.6").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: Some(1),
+                patch: Some(0),
+                pre: Vec::new(),
+            },
+            r.predicates[0]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: Some(1),
+                patch: Some(4),
+                pre: Vec::new(),
+            },
+            r.predicates[1]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::Compatible,
+                major: 0,
+                minor: Some(1),
+                patch: Some(6),
+                pre: Vec::new(),
+            },
+            r.predicates[2]
+        );
+    }
+
+    #[test]
+    pub fn test_multiple_05() {
+        let r = range::parse(">=0.5.1-alpha3, <0.6").unwrap();
+
+        assert_eq!(Predicate {
+                op: Op::GtEq,
+                major: 0,
+                minor: Some(5),
+                patch: Some(1),
+                pre: vec![Identifier::AlphaNumeric(String::from("alpha3"))],
+            },
+            r.predicates[0]
+        );
+
+        assert_eq!(Predicate {
+                op: Op::Lt,
+                major: 0,
+                minor: Some(6),
+                patch: None,
+                pre: Vec::new(),
+            },
+            r.predicates[1]
+        );
+    }
+
+    #[test]
+    fn test_parse_build_metadata_with_predicate() {
+        assert_eq!(range::parse("^1.2.3+meta").unwrap().predicates[0].op,
+                   Op::Compatible);
+        assert_eq!(range::parse("~1.2.3+meta").unwrap().predicates[0].op,
+                   Op::Tilde);
+        assert_eq!(range::parse("=1.2.3+meta").unwrap().predicates[0].op,
+                   Op::Ex);
+        assert_eq!(range::parse("<=1.2.3+meta").unwrap().predicates[0].op,
+                   Op::LtEq);
+        assert_eq!(range::parse(">=1.2.3+meta").unwrap().predicates[0].op,
+                   Op::GtEq);
+        assert_eq!(range::parse("<1.2.3+meta").unwrap().predicates[0].op,
+                   Op::Lt);
+        assert_eq!(range::parse(">1.2.3+meta").unwrap().predicates[0].op,
+                   Op::Gt);
+    }
+
+    #[test]
+    pub fn test_parse_errors() {
+        assert!(range::parse("\0").is_err());
+        assert!(range::parse(">= >= 0.0.2").is_err());
+        assert!(range::parse(">== 0.0.2").is_err());
+        assert!(range::parse("a.0.0").is_err());
+        assert!(range::parse("1.0.0-").is_err());
+        assert!(range::parse(">=").is_err());
+        assert!(range::parse("> 0.1.0,").is_err());
+        assert!(range::parse("> 0.3.0, ,").is_err());
+    }
+
+    #[test]
+    pub fn test_large_major_version() {
+        assert!(range::parse("18446744073709551617.0.0").is_err());
+    }
+
+    #[test]
+    pub fn test_large_minor_version() {
+        assert!(range::parse("0.18446744073709551617.0").is_err());
+    }
+
+    #[test]
+    pub fn test_large_patch_version() {
+        assert!(range::parse("0.0.18446744073709551617").is_err());
+    }
+}
diff --git a/rustc_deps/vendor/semver-parser/src/recognize.rs b/rustc_deps/vendor/semver-parser/src/recognize.rs
new file mode 100644
index 0000000..c0dd771
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/src/recognize.rs
@@ -0,0 +1,154 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under either of MIT or Apache License, Version 2.0,
+// at your option.
+//
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file or at
+// https://opensource.org/licenses/MIT.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Simple recognizer combinators.
+
+// This version is similar to a similar one in the "lang" module of
+// xi-editor, but is stripped down to only the needed combinators.
+
+use std::ops;
+
+pub trait Recognize {
+    fn p(&self, s: &[u8]) -> Option<usize>;
+}
+
+impl<F: Fn(&[u8]) -> Option<usize>> Recognize for F {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        self(s)
+    }
+}
+
+pub struct OneByte<F>(pub F);
+
+impl<F: Fn(u8) -> bool> Recognize for OneByte<F> {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        if s.is_empty() || !self.0(s[0]) {
+            None
+        } else {
+            Some(1)
+        }
+    }
+}
+
+impl Recognize for u8 {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        OneByte(|b| b == *self).p(s)
+    }
+}
+
+/// Use Inclusive(a..b) to indicate an inclusive range. When a...b syntax becomes
+/// stable, we can get rid of this and switch to that.
+pub struct Inclusive<T>(pub T);
+
+impl Recognize for Inclusive<ops::Range<u8>> {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        OneByte(|x| x >= self.0.start && x <= self.0.end).p(s)
+    }
+}
+
+impl<'a> Recognize for &'a [u8] {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        let len = self.len();
+        if s.len() >= len && &s[..len] == *self {
+            Some(len)
+        } else {
+            None
+        }
+    }
+}
+
+impl<'a> Recognize for &'a str {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        self.as_bytes().p(s)
+    }
+}
+
+impl<P1: Recognize, P2: Recognize> Recognize for (P1, P2) {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        self.0.p(s).and_then(|len1|
+            self.1.p(&s[len1..]).map(|len2|
+                len1 + len2))
+    }
+}
+
+/// Choice from two heterogeneous alternatives.
+pub struct Alt<P1, P2>(pub P1, pub P2);
+
+impl<P1: Recognize, P2: Recognize> Recognize for Alt<P1, P2> {
+    #[inline(always)]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        self.0.p(s).or_else(|| self.1.p(s))
+    }
+}
+
+/// Choice from a homogenous slice of parsers.
+pub struct OneOf<'a, P: 'a>(pub &'a [P]);
+
+impl<'a, P: Recognize> Recognize for OneOf<'a, P> {
+    #[inline]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        for ref p in self.0 {
+            if let Some(len) = p.p(s) {
+                return Some(len);
+            }
+        }
+        None
+    }
+}
+
+pub struct OneOrMore<P>(pub P);
+
+impl<P: Recognize> Recognize for OneOrMore<P> {
+    #[inline]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        let mut i = 0;
+        let mut count = 0;
+        while let Some(len) = self.0.p(&s[i..]) {
+            i += len;
+            count += 1;
+        }
+        if count >= 1 {
+            Some(i)
+        } else {
+            None
+        }
+    }
+}
+
+pub struct ZeroOrMore<P>(pub P);
+
+impl<P: Recognize> Recognize for ZeroOrMore<P> {
+    #[inline]
+    fn p(&self, s: &[u8]) -> Option<usize> {
+        let mut i = 0;
+        while let Some(len) = self.0.p(&s[i..]) {
+            i += len;
+        }
+        Some(i)
+    }
+}
diff --git a/rustc_deps/vendor/semver-parser/src/version.rs b/rustc_deps/vendor/semver-parser/src/version.rs
new file mode 100644
index 0000000..570f947
--- /dev/null
+++ b/rustc_deps/vendor/semver-parser/src/version.rs
@@ -0,0 +1,365 @@
+use std::fmt;
+use std::str::from_utf8;
+
+use recognize::*;
+
+use common::{self, numeric_identifier};
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct Version {
+    pub major: u64,
+    pub minor: u64,
+    pub patch: u64,
+    pub pre: Vec<Identifier>,
+    pub build: Vec<Identifier>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum Identifier {
+    /// An identifier that's solely numbers.
+    Numeric(u64),
+    /// An identifier with letters and numbers.
+    AlphaNumeric(String),
+}
+
+pub fn parse(version: &str) -> Result<Version, String> {
+    let s = version.trim().as_bytes();
+    let mut i = 0;
+    let major = if let Some((major, len)) = numeric_identifier(&s[i..]) {
+        i += len;
+        major
+    } else {
+        return Err("Error parsing major identifier".to_string());
+    };
+    if let Some(len) = b'.'.p(&s[i..]) {
+        i += len;
+    } else {
+        return Err("Expected dot".to_string());
+    }
+    let minor = if let Some((minor, len)) = numeric_identifier(&s[i..]) {
+        i += len;
+        minor
+    } else {
+        return Err("Error parsing minor identifier".to_string());
+    };
+    if let Some(len) = b'.'.p(&s[i..]) {
+        i += len;
+    } else {
+        return Err("Expected dot".to_string());
+    }
+    let patch = if let Some((patch, len)) = numeric_identifier(&s[i..]) {
+        i += len;
+        patch
+    } else {
+        return Err("Error parsing patch identifier".to_string());
+    };
+    let (pre, pre_len) = common::parse_optional_meta(&s[i..], b'-')?;
+    i += pre_len;
+    let (build, build_len) = common::parse_optional_meta(&s[i..], b'+')?;
+    i += build_len;
+    if i != s.len() {
+        return Err("Extra junk after valid version: ".to_string() +
+            from_utf8(&s[i..]).unwrap());
+    }
+    Ok(Version {
+        major: major,
+        minor: minor,
+        patch: patch,
+        pre: pre,
+        build: build,
+    })
+}
+
+impl fmt::Display for Version {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        try!(write!(f, "{}.{}.{}", self.major, self.minor, self.patch));
+        if !self.pre.is_empty() {
+            let strs: Vec<_> =
+                self.pre.iter().map(ToString::to_string).collect();
+            try!(write!(f, "-{}", strs.join(".")));
+        }
+        if !self.build.is_empty() {
+            let strs: Vec<_> =
+                self.build.iter().map(ToString::to_string).collect();
+            try!(write!(f, "+{}", strs.join(".")));
+        }
+        Ok(())
+    }
+}
+
+impl fmt::Display for Identifier {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            Identifier::Numeric(ref id) => id.fmt(f),
+            Identifier::AlphaNumeric(ref id) => id.fmt(f),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use version;
+    use super::*;
+
+    #[test]
+    fn parse_empty() {
+        let version = "";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "empty string incorrectly considered a valid parse");
+    }
+
+    #[test]
+    fn parse_blank() {
+        let version = "  ";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "blank string incorrectly considered a valid parse");
+    }
+
+    #[test]
+    fn parse_no_minor_patch() {
+        let version = "1";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), format!("'{}' incorrectly considered a valid parse", version));
+    }
+
+    #[test]
+    fn parse_no_patch() {
+        let version = "1.2";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), format!("'{}' incorrectly considered a valid parse", version));
+    }
+
+    #[test]
+    fn parse_empty_pre() {
+        let version = "1.2.3-";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), format!("'{}' incorrectly considered a valid parse", version));
+    }
+
+    #[test]
+    fn parse_letters() {
+        let version = "a.b.c";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), format!("'{}' incorrectly considered a valid parse", version));
+    }
+
+    #[test]
+    fn parse_with_letters() {
+        let version = "1.2.3 a.b.c";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), format!("'{}' incorrectly considered a valid parse", version));
+    }
+
+    #[test]
+    fn parse_basic_version() {
+        let version = "1.2.3";
+
+        let parsed = version::parse(version).unwrap();
+
+        assert_eq!(1, parsed.major);
+        assert_eq!(2, parsed.minor);
+        assert_eq!(3, parsed.patch);
+    }
+
+    #[test]
+    fn parse_trims_input() {
+        let version = "  1.2.3  ";
+
+        let parsed = version::parse(version).unwrap();
+
+        assert_eq!(1, parsed.major);
+        assert_eq!(2, parsed.minor);
+        assert_eq!(3, parsed.patch);
+    }
+
+    #[test]
+    fn parse_no_major_leading_zeroes() {
+        let version = "01.0.0";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "01 incorrectly considered a valid major version");
+    }
+
+    #[test]
+    fn parse_no_minor_leading_zeroes() {
+        let version = "0.01.0";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "01 incorrectly considered a valid minor version");
+    }
+
+    #[test]
+    fn parse_no_patch_leading_zeroes() {
+        let version = "0.0.01";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "01 incorrectly considered a valid patch version");
+    }
+
+    #[test]
+    fn parse_no_major_overflow() {
+        let version = "98765432109876543210.0.0";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "98765432109876543210 incorrectly considered a valid major version");
+    }
+
+    #[test]
+    fn parse_no_minor_overflow() {
+        let version = "0.98765432109876543210.0";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "98765432109876543210 incorrectly considered a valid minor version");
+    }
+
+    #[test]
+    fn parse_no_patch_overflow() {
+        let version = "0.0.98765432109876543210";
+
+        let parsed = version::parse(version);
+
+        assert!(parsed.is_err(), "98765432109876543210 incorrectly considered a valid patch version");
+    }
+
+    #[test]
+    fn parse_basic_prerelease() {
+        let version = "1.2.3-pre";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("pre"))];
+        assert_eq!(expected_pre, parsed.pre);
+    }
+
+    #[test]
+    fn parse_prerelease_alphanumeric() {
+        let version = "1.2.3-alpha1";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("alpha1"))];
+        assert_eq!(expected_pre, parsed.pre);
+    }
+
+    #[test]
+    fn parse_prerelease_zero() {
+        let version = "1.2.3-pre.0";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("pre")),
+                                Identifier::Numeric(0)];
+        assert_eq!(expected_pre, parsed.pre);
+    }
+
+    #[test]
+    fn parse_basic_build() {
+        let version = "1.2.3+build";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("build"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_build_alphanumeric() {
+        let version = "1.2.3+build5";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("build5"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_pre_and_build() {
+        let version = "1.2.3-alpha1+build5";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("alpha1"))];
+        assert_eq!(expected_pre, parsed.pre);
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("build5"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_complex_metadata_01() {
+        let version = "1.2.3-1.alpha1.9+build5.7.3aedf  ";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::Numeric(1),
+                                Identifier::AlphaNumeric(String::from("alpha1")),
+                                Identifier::Numeric(9)];
+        assert_eq!(expected_pre, parsed.pre);
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("build5")),
+                                  Identifier::Numeric(7),
+                                  Identifier::AlphaNumeric(String::from("3aedf"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_complex_metadata_02() {
+        let version = "0.4.0-beta.1+0851523";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("beta")),
+                                Identifier::Numeric(1)];
+        assert_eq!(expected_pre, parsed.pre);
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("0851523"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_metadata_overflow() {
+        let version = "0.4.0-beta.1+98765432109876543210";
+
+        let parsed = version::parse(version).unwrap();
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("beta")),
+                                Identifier::Numeric(1)];
+        assert_eq!(expected_pre, parsed.pre);
+
+        let expected_build = vec![Identifier::AlphaNumeric(String::from("98765432109876543210"))];
+        assert_eq!(expected_build, parsed.build);
+    }
+
+    #[test]
+    fn parse_regression_01() {
+        let version = "0.0.0-WIP";
+
+        let parsed = version::parse(version).unwrap();
+
+        assert_eq!(0, parsed.major);
+        assert_eq!(0, parsed.minor);
+        assert_eq!(0, parsed.patch);
+
+        let expected_pre = vec![Identifier::AlphaNumeric(String::from("WIP"))];
+        assert_eq!(expected_pre, parsed.pre);
+    }
+}
diff --git a/rustc_deps/vendor/semver/.cargo-checksum.json b/rustc_deps/vendor/semver/.cargo-checksum.json
new file mode 100644
index 0000000..2f9af6e
--- /dev/null
+++ b/rustc_deps/vendor/semver/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"a5b995796b5559de8975a6fee7166c9fda6c21b449ec90bef5f9baaeddd479a5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"c780d8c3c802c5fe2c316127900385010c3e57f71c851eea9e8ed8495e2030dd","src/lib.rs":"cb1725a8bb90c1043f187c6ba504d0a9d07793e2f39f5205f926c58849311770","src/version.rs":"ffdf9c628597b889f149f3b2b1245b97c774eae1ce7030bd19235eabecaaede0","src/version_req.rs":"40d20720f5fdc0b3d9e398e64eb448a65987229bd322cab0fedf0cf1843f3bd8","tests/deprecation.rs":"b5ec79e19d61968d05b96b876c449e54d43cbd1762c6e63c23c3470f9db56292","tests/regression.rs":"180b699ad029b81e6135d42f0a8e6d782177bc29a41132f875ee6f8607a46b56","tests/serde.rs":"cdbbefc576ffcc814c30dad9598ab87a7fd9d14c5f42f1349e1db6afc72f8fed"},"package":"1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/semver/Cargo.toml b/rustc_deps/vendor/semver/Cargo.toml
new file mode 100644
index 0000000..7749f76
--- /dev/null
+++ b/rustc_deps/vendor/semver/Cargo.toml
@@ -0,0 +1,45 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "semver"
+version = "0.9.0"
+authors = ["Steve Klabnik <steve@steveklabnik.com>", "The Rust Project Developers"]
+description = "Semantic version parsing and comparison.\n"
+homepage = "https://docs.rs/crate/semver/"
+documentation = "https://docs.rs/crate/semver/"
+readme = "README.md"
+license = "MIT/Apache-2.0"
+repository = "https://github.com/steveklabnik/semver"
+[dependencies.semver-parser]
+version = "0.7.0"
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+[dev-dependencies.crates-index]
+version = "0.5.0"
+
+[dev-dependencies.serde_json]
+version = "1.0"
+
+[dev-dependencies.serde_derive]
+version = "1.0"
+
+[dev-dependencies.tempdir]
+version = "0.3.4"
+
+[features]
+default = []
+ci = ["serde"]
+[badges.travis-ci]
+repository = "steveklabnik/semver"
diff --git a/rustc_deps/vendor/parking_lot_core/LICENSE b/rustc_deps/vendor/semver/LICENSE-APACHE
similarity index 88%
copy from rustc_deps/vendor/parking_lot_core/LICENSE
copy to rustc_deps/vendor/semver/LICENSE-APACHE
index 5a35ba9..16fe87b 100644
--- a/rustc_deps/vendor/parking_lot_core/LICENSE
+++ b/rustc_deps/vendor/semver/LICENSE-APACHE
@@ -1,5 +1,3 @@
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-APACHE:
-
                               Apache License
                         Version 2.0, January 2004
                      http://www.apache.org/licenses/
@@ -201,31 +199,3 @@
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-========================================
-https://raw.githubusercontent.com/Amanieu/parking_lot/master/LICENSE-MIT:
-
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/semver/LICENSE-MIT b/rustc_deps/vendor/semver/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rustc_deps/vendor/semver/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/semver/README.md b/rustc_deps/vendor/semver/README.md
new file mode 100644
index 0000000..2a5306d
--- /dev/null
+++ b/rustc_deps/vendor/semver/README.md
@@ -0,0 +1,103 @@
+semver
+======
+
+Semantic version parsing and comparison.
+
+[![Build Status](https://api.travis-ci.org/steveklabnik/semver.svg?branch=master)](https://travis-ci.org/steveklabnik/semver)
+
+[Documentation](https://steveklabnik.github.io/semver)
+
+Semantic versioning (see http://semver.org/) is a set of rules for
+assigning version numbers.
+
+## SemVer and the Rust ecosystem
+
+Rust itself follows the SemVer specification, as does its standard libraries. The two are
+not tied together.
+
+[Cargo](https://crates.io), Rust's package manager, uses SemVer to determine which versions of
+packages you need installed.
+
+## Installation
+
+To use `semver`, add this to your `[dependencies]` section:
+
+```toml
+semver = "0.7.0"
+```
+
+And this to your crate root:
+
+```rust
+extern crate semver;
+```
+
+## Versions
+
+At its simplest, the `semver` crate allows you to construct `Version` objects using the `parse`
+method:
+
+```rust
+use semver::Version;
+
+assert!(Version::parse("1.2.3") == Ok(Version {
+   major: 1,
+   minor: 2,
+   patch: 3,
+   pre: vec!(),
+   build: vec!(),
+}));
+```
+
+If you have multiple `Version`s, you can use the usual comparison operators to compare them:
+
+```rust
+use semver::Version;
+
+assert!(Version::parse("1.2.3-alpha")  != Version::parse("1.2.3-beta"));
+assert!(Version::parse("1.2.3-alpha2") >  Version::parse("1.2.0"));
+```
+
+## Requirements
+
+The `semver` crate also provides the ability to compare requirements, which are more complex
+comparisons.
+
+For example, creating a requirement that only matches versions greater than or
+equal to 1.0.0:
+
+```rust
+use semver::Version;
+use semver::VersionReq;
+
+let r = VersionReq::parse(">= 1.0.0").unwrap();
+let v = Version::parse("1.0.0").unwrap();
+
+assert!(r.to_string() == ">= 1.0.0".to_string());
+assert!(r.matches(&v))
+```
+
+It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at
+https://www.npmjs.org/doc/misc/semver.html
+
+**Tilde requirements** specify a minimal version with some updates:
+
+```notrust
+~1.2.3 := >=1.2.3 <1.3.0
+~1.2   := >=1.2.0 <1.3.0
+~1     := >=1.0.0 <2.0.0
+```
+
+**Caret requirements** allow SemVer compatible updates to a specified version,
+`0.x` and `0.x+1` are not considered compatible, but `1.x` and `1.x+1` are.
+
+`0.0.x` is not considered compatible with any other version.
+Missing minor and patch versions are desugared to `0` but allow flexibility for that value.
+
+```notrust
+^1.2.3 := >=1.2.3 <2.0.0
+^0.2.3 := >=0.2.3 <0.3.0
+^0.0.3 := >=0.0.3 <0.0.4
+^0.0   := >=0.0.0 <0.1.0
+^0     := >=0.0.0 <1.0.0
+```
diff --git a/rustc_deps/vendor/semver/src/lib.rs b/rustc_deps/vendor/semver/src/lib.rs
new file mode 100644
index 0000000..a38aae0
--- /dev/null
+++ b/rustc_deps/vendor/semver/src/lib.rs
@@ -0,0 +1,182 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Semantic version parsing and comparison.
+//!
+//! Semantic versioning (see http://semver.org/) is a set of rules for
+//! assigning version numbers.
+//!
+//! ## SemVer overview
+//!
+//! Given a version number MAJOR.MINOR.PATCH, increment the:
+//!
+//! 1. MAJOR version when you make incompatible API changes,
+//! 2. MINOR version when you add functionality in a backwards-compatible
+//!    manner, and
+//! 3. PATCH version when you make backwards-compatible bug fixes.
+//!
+//! Additional labels for pre-release and build metadata are available as
+//! extensions to the MAJOR.MINOR.PATCH format.
+//!
+//! Any references to 'the spec' in this documentation refer to [version 2.0 of
+//! the SemVer spec](http://semver.org/spec/v2.0.0.html).
+//!
+//! ## SemVer and the Rust ecosystem
+//!
+//! Rust itself follows the SemVer specification, as does its standard
+//! libraries. The two are not tied together.
+//!
+//! [Cargo](http://crates.io), Rust's package manager, uses SemVer to determine
+//! which versions of packages you need installed.
+//!
+//! ## Versions
+//!
+//! At its simplest, the `semver` crate allows you to construct `Version`
+//! objects using the `parse` method:
+//!
+//! ```{rust}
+//! use semver::Version;
+//!
+//! assert!(Version::parse("1.2.3") == Ok(Version {
+//!    major: 1,
+//!    minor: 2,
+//!    patch: 3,
+//!    pre: vec!(),
+//!    build: vec!(),
+//! }));
+//! ```
+//!
+//! If you have multiple `Version`s, you can use the usual comparison operators
+//! to compare them:
+//!
+//! ```{rust}
+//! use semver::Version;
+//!
+//! assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta"));
+//! assert!(Version::parse("1.2.3-alpha2") >  Version::parse("1.2.0"));
+//! ```
+//!
+//! If you explicitly need to modify a Version, SemVer also allows you to
+//! increment the major, minor, and patch numbers in accordance with the spec.
+//!
+//! Please note that in order to do this, you must use a mutable Version:
+//!
+//! ```{rust}
+//! use semver::Version;
+//!
+//! let mut bugfix_release = Version::parse("1.0.0").unwrap();
+//! bugfix_release.increment_patch();
+//!
+//! assert_eq!(Ok(bugfix_release), Version::parse("1.0.1"));
+//! ```
+//!
+//! When incrementing the minor version number, the patch number resets to zero
+//! (in accordance with section 7 of the spec)
+//!
+//! ```{rust}
+//! use semver::Version;
+//!
+//! let mut feature_release = Version::parse("1.4.6").unwrap();
+//! feature_release.increment_minor();
+//!
+//! assert_eq!(Ok(feature_release), Version::parse("1.5.0"));
+//! ```
+//!
+//! Similarly, when incrementing the major version number, the patch and minor
+//! numbers reset to zero (in accordance with section 8 of the spec)
+//!
+//! ```{rust}
+//! use semver::Version;
+//!
+//! let mut chrome_release = Version::parse("41.5.5377").unwrap();
+//! chrome_release.increment_major();
+//!
+//! assert_eq!(Ok(chrome_release), Version::parse("42.0.0"));
+//! ```
+//!
+//! ## Requirements
+//!
+//! The `semver` crate also provides the ability to compare requirements, which
+//! are more complex comparisons.
+//!
+//! For example, creating a requirement that only matches versions greater than
+//! or equal to 1.0.0:
+//!
+//! ```{rust}
+//! # #![allow(unstable)]
+//! use semver::Version;
+//! use semver::VersionReq;
+//!
+//! let r = VersionReq::parse(">= 1.0.0").unwrap();
+//! let v = Version::parse("1.0.0").unwrap();
+//!
+//! assert!(r.to_string() == ">= 1.0.0".to_string());
+//! assert!(r.matches(&v))
+//! ```
+//!
+//! It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at
+//! https://www.npmjs.org/doc/misc/semver.html
+//!
+//! **Tilde requirements** specify a minimal version with some updates:
+//!
+//! ```notrust
+//! ~1.2.3 := >=1.2.3 <1.3.0
+//! ~1.2   := >=1.2.0 <1.3.0
+//! ~1     := >=1.0.0 <2.0.0
+//! ```
+//!
+//! **Caret requirements** allow SemVer compatible updates to a specified
+//! verion, `0.x` and `0.x+1` are not considered compatible, but `1.x` and
+//! `1.x+1` are.
+//!
+//! `0.0.x` is not considered compatible with any other version.
+//! Missing minor and patch versions are desugared to `0` but allow flexibility
+//! for that value.
+//!
+//! ```notrust
+//! ^1.2.3 := >=1.2.3 <2.0.0
+//! ^0.2.3 := >=0.2.3 <0.3.0
+//! ^0.0.3 := >=0.0.3 <0.0.4
+//! ^0.0   := >=0.0.0 <0.1.0
+//! ^0     := >=0.0.0 <1.0.0
+//! ```
+//!
+//! **Wildcard requirements** allows parsing of version requirements of the
+//! formats `*`, `x.*` and `x.y.*`.
+//!
+//! ```notrust
+//! *     := >=0.0.0
+//! 1.*   := >=1.0.0 <2.0.0
+//! 1.2.* := >=1.2.0 <1.3.0
+//! ```
+
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+       html_favicon_url = "https://www.rust-lang.org/favicon.ico")]
+#![deny(missing_docs)]
+#![cfg_attr(test, deny(warnings))]
+
+extern crate semver_parser;
+
+// Serialization and deserialization support for version numbers
+#[cfg(feature = "serde")]
+extern crate serde;
+
+// We take the common approach of keeping our own module system private, and
+// just re-exporting the interface that we want.
+
+pub use version::{Version, Identifier, SemVerError};
+pub use version::Identifier::{Numeric, AlphaNumeric};
+pub use version_req::{VersionReq, ReqParseError};
+
+// SemVer-compliant versions.
+mod version;
+
+// advanced version comparisons
+mod version_req;
diff --git a/rustc_deps/vendor/semver/src/version.rs b/rustc_deps/vendor/semver/src/version.rs
new file mode 100644
index 0000000..38de133
--- /dev/null
+++ b/rustc_deps/vendor/semver/src/version.rs
@@ -0,0 +1,759 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The `version` module gives you tools to create and compare SemVer-compliant
+//! versions.
+
+use std::cmp::{self, Ordering};
+use std::fmt;
+use std::hash;
+use std::error::Error;
+
+use std::result;
+use std::str;
+
+use semver_parser;
+
+#[cfg(feature = "serde")]
+use serde::ser::{Serialize, Serializer};
+#[cfg(feature = "serde")]
+use serde::de::{self, Deserialize, Deserializer, Visitor};
+
+/// An identifier in the pre-release or build metadata.
+///
+/// See sections 9 and 10 of the spec for more about pre-release identifers and
+/// build metadata.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum Identifier {
+    /// An identifier that's solely numbers.
+    Numeric(u64),
+    /// An identifier with letters and numbers.
+    AlphaNumeric(String),
+}
+
+impl From<semver_parser::version::Identifier> for Identifier {
+    fn from(other: semver_parser::version::Identifier) -> Identifier {
+        match other {
+            semver_parser::version::Identifier::Numeric(n) => Identifier::Numeric(n),
+            semver_parser::version::Identifier::AlphaNumeric(s) => Identifier::AlphaNumeric(s),
+        }
+    }
+}
+
+impl fmt::Display for Identifier {
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            Identifier::Numeric(ref n) => fmt::Display::fmt(n, f),
+            Identifier::AlphaNumeric(ref s) => fmt::Display::fmt(s, f),
+        }
+    }
+}
+
+#[cfg(feature = "serde")]
+impl Serialize for Identifier {
+    fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
+        where S: Serializer
+    {
+        // Serialize Identifier as a number or string.
+        match *self {
+            Identifier::Numeric(n) => serializer.serialize_u64(n),
+            Identifier::AlphaNumeric(ref s) => serializer.serialize_str(s),
+        }
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> Deserialize<'de> for Identifier {
+    fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
+        where D: Deserializer<'de>
+    {
+        struct IdentifierVisitor;
+
+        // Deserialize Identifier from a number or string.
+        impl<'de> Visitor<'de> for IdentifierVisitor {
+            type Value = Identifier;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("a SemVer pre-release or build identifier")
+            }
+
+            fn visit_u64<E>(self, numeric: u64) -> result::Result<Self::Value, E>
+                where E: de::Error
+            {
+                Ok(Identifier::Numeric(numeric))
+            }
+
+            fn visit_str<E>(self, alphanumeric: &str) -> result::Result<Self::Value, E>
+                where E: de::Error
+            {
+                Ok(Identifier::AlphaNumeric(alphanumeric.to_owned()))
+            }
+        }
+
+        deserializer.deserialize_any(IdentifierVisitor)
+    }
+}
+
+/// Represents a version number conforming to the semantic versioning scheme.
+#[derive(Clone, Eq, Debug)]
+pub struct Version {
+    /// The major version, to be incremented on incompatible changes.
+    pub major: u64,
+    /// The minor version, to be incremented when functionality is added in a
+    /// backwards-compatible manner.
+    pub minor: u64,
+    /// The patch version, to be incremented when backwards-compatible bug
+    /// fixes are made.
+    pub patch: u64,
+    /// The pre-release version identifier, if one exists.
+    pub pre: Vec<Identifier>,
+    /// The build metadata, ignored when determining version precedence.
+    pub build: Vec<Identifier>,
+}
+
+impl From<semver_parser::version::Version> for Version {
+    fn from(other: semver_parser::version::Version) -> Version {
+        Version {
+            major: other.major,
+            minor: other.minor,
+            patch: other.patch,
+            pre: other.pre.into_iter().map(From::from).collect(),
+            build: other.build.into_iter().map(From::from).collect(),
+        }
+    }
+}
+
+#[cfg(feature = "serde")]
+impl Serialize for Version {
+    fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
+        where S: Serializer
+    {
+        // Serialize Version as a string.
+        serializer.collect_str(self)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> Deserialize<'de> for Version {
+    fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
+        where D: Deserializer<'de>
+    {
+        struct VersionVisitor;
+
+        // Deserialize Version from a string.
+        impl<'de> Visitor<'de> for VersionVisitor {
+            type Value = Version;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("a SemVer version as a string")
+            }
+
+            fn visit_str<E>(self, v: &str) -> result::Result<Self::Value, E>
+                where E: de::Error
+            {
+                Version::parse(v).map_err(de::Error::custom)
+            }
+        }
+
+        deserializer.deserialize_str(VersionVisitor)
+    }
+}
+
+/// An error type for this crate
+///
+/// Currently, just a generic error. Will make this nicer later.
+#[derive(Clone,PartialEq,Debug,PartialOrd)]
+pub enum SemVerError {
+    /// An error ocurred while parsing.
+    ParseError(String),
+}
+
+impl fmt::Display for SemVerError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            &SemVerError::ParseError(ref m) => write!(f, "{}", m),
+        }
+    }
+}
+
+impl Error for SemVerError {
+    fn description(&self) -> &str {
+        match self {
+            &SemVerError::ParseError(ref m) => m,
+        }
+    }
+}
+
+/// A Result type for errors
+pub type Result<T> = result::Result<T, SemVerError>;
+
+impl Version {
+
+    /// Contructs the simple case without pre or build.
+    pub fn new(major: u64, minor: u64, patch: u64) -> Version {
+        Version {
+            major: major,
+            minor: minor,
+            patch: patch,
+            pre: Vec::new(),
+            build: Vec::new()
+        }
+    }
+
+    /// Parse a string into a semver object.
+    pub fn parse(version: &str) -> Result<Version> {
+        let res = semver_parser::version::parse(version);
+
+        match res {
+            // Convert plain String error into proper ParseError
+            Err(e) => Err(SemVerError::ParseError(e)),
+            Ok(v) => Ok(From::from(v)),
+        }
+    }
+
+    /// Clears the build metadata
+    fn clear_metadata(&mut self) {
+        self.build = Vec::new();
+        self.pre = Vec::new();
+    }
+
+    /// Increments the patch number for this Version (Must be mutable)
+    pub fn increment_patch(&mut self) {
+        self.patch += 1;
+        self.clear_metadata();
+    }
+
+    /// Increments the minor version number for this Version (Must be mutable)
+    ///
+    /// As instructed by section 7 of the spec, the patch number is reset to 0.
+    pub fn increment_minor(&mut self) {
+        self.minor += 1;
+        self.patch = 0;
+        self.clear_metadata();
+    }
+
+    /// Increments the major version number for this Version (Must be mutable)
+    ///
+    /// As instructed by section 8 of the spec, the minor and patch numbers are
+    /// reset to 0
+    pub fn increment_major(&mut self) {
+        self.major += 1;
+        self.minor = 0;
+        self.patch = 0;
+        self.clear_metadata();
+    }
+
+    /// Checks to see if the current Version is in pre-release status
+    pub fn is_prerelease(&self) -> bool {
+        !self.pre.is_empty()
+    }
+}
+
+impl str::FromStr for Version {
+    type Err = SemVerError;
+
+    fn from_str(s: &str) -> Result<Version> {
+        Version::parse(s)
+    }
+}
+
+impl fmt::Display for Version {
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        try!(write!(f, "{}.{}.{}", self.major, self.minor, self.patch));
+        if !self.pre.is_empty() {
+            try!(write!(f, "-"));
+            for (i, x) in self.pre.iter().enumerate() {
+                if i != 0 {
+                    try!(write!(f, "."))
+                }
+                try!(write!(f, "{}", x));
+            }
+        }
+        if !self.build.is_empty() {
+            try!(write!(f, "+"));
+            for (i, x) in self.build.iter().enumerate() {
+                if i != 0 {
+                    try!(write!(f, "."))
+                }
+                try!(write!(f, "{}", x));
+            }
+        }
+        Ok(())
+    }
+}
+
+impl cmp::PartialEq for Version {
+    #[inline]
+    fn eq(&self, other: &Version) -> bool {
+        // We should ignore build metadata here, otherwise versions v1 and v2
+        // can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which
+        // violate strict total ordering rules.
+        self.major == other.major && self.minor == other.minor && self.patch == other.patch &&
+        self.pre == other.pre
+    }
+}
+
+impl cmp::PartialOrd for Version {
+    fn partial_cmp(&self, other: &Version) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl cmp::Ord for Version {
+    fn cmp(&self, other: &Version) -> Ordering {
+        match self.major.cmp(&other.major) {
+            Ordering::Equal => {}
+            r => return r,
+        }
+
+        match self.minor.cmp(&other.minor) {
+            Ordering::Equal => {}
+            r => return r,
+        }
+
+        match self.patch.cmp(&other.patch) {
+            Ordering::Equal => {}
+            r => return r,
+        }
+
+        // NB: semver spec says 0.0.0-pre < 0.0.0
+        // but the version of ord defined for vec
+        // says that [] < [pre] so we alter it here
+        match (self.pre.len(), other.pre.len()) {
+            (0, 0) => Ordering::Equal,
+            (0, _) => Ordering::Greater,
+            (_, 0) => Ordering::Less,
+            (_, _) => self.pre.cmp(&other.pre),
+        }
+    }
+}
+
+impl hash::Hash for Version {
+    fn hash<H: hash::Hasher>(&self, into: &mut H) {
+        self.major.hash(into);
+        self.minor.hash(into);
+        self.patch.hash(into);
+        self.pre.hash(into);
+    }
+}
+
+impl From<(u64,u64,u64)> for Version {
+    fn from(tuple: (u64,u64,u64)) -> Version {
+        let (major, minor, patch) = tuple;
+        Version::new(major, minor, patch)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::result;
+    use super::Version;
+    use super::Identifier;
+    use super::SemVerError;
+
+    #[test]
+    fn test_parse() {
+        fn parse_error(e: &str) -> result::Result<Version, SemVerError> {
+            return Err(SemVerError::ParseError(e.to_string()));
+        }
+
+        assert_eq!(Version::parse(""),
+                   parse_error("Error parsing major identifier"));
+        assert_eq!(Version::parse("  "),
+                   parse_error("Error parsing major identifier"));
+        assert_eq!(Version::parse("1"),
+                   parse_error("Expected dot"));
+        assert_eq!(Version::parse("1.2"),
+                   parse_error("Expected dot"));
+        assert_eq!(Version::parse("1.2.3-"),
+                   parse_error("Error parsing prerelease"));
+        assert_eq!(Version::parse("a.b.c"),
+                   parse_error("Error parsing major identifier"));
+        assert_eq!(Version::parse("1.2.3 abc"),
+                   parse_error("Extra junk after valid version:  abc"));
+
+        assert_eq!(Version::parse("1.2.3"),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: Vec::new(),
+                   }));
+
+        assert_eq!(Version::parse("1.2.3"),
+                   Ok(Version::new(1,2,3)));
+
+        assert_eq!(Version::parse("  1.2.3  "),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: Vec::new(),
+                   }));
+        assert_eq!(Version::parse("1.2.3-alpha1"),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: Vec::new(),
+                   }));
+        assert_eq!(Version::parse("  1.2.3-alpha1  "),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: Vec::new(),
+                   }));
+        assert_eq!(Version::parse("1.2.3+build5"),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!(Version::parse("  1.2.3+build5  "),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!(Version::parse("1.2.3-alpha1+build5"),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!(Version::parse("  1.2.3-alpha1+build5  "),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!(Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf  "),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::Numeric(1),
+                      Identifier::AlphaNumeric(String::from("alpha1")),
+                      Identifier::Numeric(9),
+            ],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5")),
+                        Identifier::Numeric(7),
+                        Identifier::AlphaNumeric(String::from("3aedf")),
+            ],
+                   }));
+        assert_eq!(Version::parse("0.4.0-beta.1+0851523"),
+                   Ok(Version {
+                       major: 0,
+                       minor: 4,
+                       patch: 0,
+                       pre: vec![Identifier::AlphaNumeric(String::from("beta")),
+                      Identifier::Numeric(1),
+            ],
+                       build: vec![Identifier::AlphaNumeric(String::from("0851523"))],
+                   }));
+
+    }
+
+    #[test]
+    fn test_increment_patch() {
+        let mut buggy_release = Version::parse("0.1.0").unwrap();
+        buggy_release.increment_patch();
+        assert_eq!(buggy_release, Version::parse("0.1.1").unwrap());
+    }
+
+    #[test]
+    fn test_increment_minor() {
+        let mut feature_release = Version::parse("1.4.6").unwrap();
+        feature_release.increment_minor();
+        assert_eq!(feature_release, Version::parse("1.5.0").unwrap());
+    }
+
+    #[test]
+    fn test_increment_major() {
+        let mut chrome_release = Version::parse("46.1.246773").unwrap();
+        chrome_release.increment_major();
+        assert_eq!(chrome_release, Version::parse("47.0.0").unwrap());
+    }
+
+    #[test]
+    fn test_increment_keep_prerelease() {
+        let mut release = Version::parse("1.0.0-alpha").unwrap();
+        release.increment_patch();
+
+        assert_eq!(release, Version::parse("1.0.1").unwrap());
+
+        release.increment_minor();
+
+        assert_eq!(release, Version::parse("1.1.0").unwrap());
+
+        release.increment_major();
+
+        assert_eq!(release, Version::parse("2.0.0").unwrap());
+    }
+
+
+    #[test]
+    fn test_increment_clear_metadata() {
+        let mut release = Version::parse("1.0.0+4442").unwrap();
+        release.increment_patch();
+
+        assert_eq!(release, Version::parse("1.0.1").unwrap());
+        release = Version::parse("1.0.1+hello").unwrap();
+
+        release.increment_minor();
+
+        assert_eq!(release, Version::parse("1.1.0").unwrap());
+        release = Version::parse("1.1.3747+hello").unwrap();
+
+        release.increment_major();
+
+        assert_eq!(release, Version::parse("2.0.0").unwrap());
+    }
+
+    #[test]
+    fn test_eq() {
+        assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3"));
+        assert_eq!(Version::parse("1.2.3-alpha1"),
+                   Version::parse("1.2.3-alpha1"));
+        assert_eq!(Version::parse("1.2.3+build.42"),
+                   Version::parse("1.2.3+build.42"));
+        assert_eq!(Version::parse("1.2.3-alpha1+42"),
+                   Version::parse("1.2.3-alpha1+42"));
+        assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42"));
+    }
+
+    #[test]
+    fn test_ne() {
+        assert!(Version::parse("0.0.0") != Version::parse("0.0.1"));
+        assert!(Version::parse("0.0.0") != Version::parse("0.1.0"));
+        assert!(Version::parse("0.0.0") != Version::parse("1.0.0"));
+        assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta"));
+    }
+
+    #[test]
+    fn test_show() {
+        assert_eq!(format!("{}", Version::parse("1.2.3").unwrap()),
+                   "1.2.3".to_string());
+        assert_eq!(format!("{}", Version::parse("1.2.3-alpha1").unwrap()),
+                   "1.2.3-alpha1".to_string());
+        assert_eq!(format!("{}", Version::parse("1.2.3+build.42").unwrap()),
+                   "1.2.3+build.42".to_string());
+        assert_eq!(format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()),
+                   "1.2.3-alpha1+42".to_string());
+    }
+
+    #[test]
+    fn test_to_string() {
+        assert_eq!(Version::parse("1.2.3").unwrap().to_string(),
+                   "1.2.3".to_string());
+        assert_eq!(Version::parse("1.2.3-alpha1").unwrap().to_string(),
+                   "1.2.3-alpha1".to_string());
+        assert_eq!(Version::parse("1.2.3+build.42").unwrap().to_string(),
+                   "1.2.3+build.42".to_string());
+        assert_eq!(Version::parse("1.2.3-alpha1+42").unwrap().to_string(),
+                   "1.2.3-alpha1+42".to_string());
+    }
+
+    #[test]
+    fn test_lt() {
+        assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3"));
+        assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2"));
+        assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2")));
+        assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42")));
+    }
+
+    #[test]
+    fn test_le() {
+        assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42"));
+    }
+
+    #[test]
+    fn test_gt() {
+        assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0"));
+        assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0"));
+        assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0"));
+        assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1"));
+        assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2"));
+        assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2")));
+        assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42")));
+    }
+
+    #[test]
+    fn test_ge() {
+        assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0"));
+        assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0"));
+        assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0"));
+        assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1"));
+        assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2"));
+        assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42"));
+    }
+
+    #[test]
+    fn test_prerelease_check() {
+        assert!(Version::parse("1.0.0").unwrap().is_prerelease() == false);
+        assert!(Version::parse("0.0.1").unwrap().is_prerelease() == false);
+        assert!(Version::parse("4.1.4-alpha").unwrap().is_prerelease());
+        assert!(Version::parse("1.0.0-beta294296").unwrap().is_prerelease());
+    }
+
+    #[test]
+    fn test_spec_order() {
+        let vs = ["1.0.0-alpha",
+                  "1.0.0-alpha.1",
+                  "1.0.0-alpha.beta",
+                  "1.0.0-beta",
+                  "1.0.0-beta.2",
+                  "1.0.0-beta.11",
+                  "1.0.0-rc.1",
+                  "1.0.0"];
+        let mut i = 1;
+        while i < vs.len() {
+            let a = Version::parse(vs[i - 1]);
+            let b = Version::parse(vs[i]);
+            assert!(a < b, "nope {:?} < {:?}", a, b);
+            i += 1;
+        }
+    }
+
+    #[test]
+    fn test_from_str() {
+        assert_eq!("1.2.3".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: Vec::new(),
+                   }));
+        assert_eq!("  1.2.3  ".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: Vec::new(),
+                   }));
+        assert_eq!("1.2.3-alpha1".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: Vec::new(),
+                   }));
+        assert_eq!("  1.2.3-alpha1  ".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: Vec::new(),
+                   }));
+        assert_eq!("1.2.3+build5".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!("  1.2.3+build5  ".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: Vec::new(),
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!("1.2.3-alpha1+build5".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!("  1.2.3-alpha1+build5  ".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5"))],
+                   }));
+        assert_eq!("1.2.3-1.alpha1.9+build5.7.3aedf  ".parse(),
+                   Ok(Version {
+                       major: 1,
+                       minor: 2,
+                       patch: 3,
+                       pre: vec![Identifier::Numeric(1),
+                      Identifier::AlphaNumeric(String::from("alpha1")),
+                      Identifier::Numeric(9),
+            ],
+                       build: vec![Identifier::AlphaNumeric(String::from("build5")),
+                        Identifier::Numeric(7),
+                        Identifier::AlphaNumeric(String::from("3aedf")),
+            ],
+                   }));
+        assert_eq!("0.4.0-beta.1+0851523".parse(),
+                   Ok(Version {
+                       major: 0,
+                       minor: 4,
+                       patch: 0,
+                       pre: vec![Identifier::AlphaNumeric(String::from("beta")),
+                      Identifier::Numeric(1),
+            ],
+                       build: vec![Identifier::AlphaNumeric(String::from("0851523"))],
+                   }));
+
+    }
+
+    #[test]
+    fn test_from_str_errors() {
+        fn parse_error(e: &str) -> result::Result<Version, SemVerError> {
+            return Err(SemVerError::ParseError(e.to_string()));
+        }
+
+        assert_eq!("".parse(), parse_error("Error parsing major identifier"));
+        assert_eq!("  ".parse(), parse_error("Error parsing major identifier"));
+        assert_eq!("1".parse(), parse_error("Expected dot"));
+        assert_eq!("1.2".parse(),
+                   parse_error("Expected dot"));
+        assert_eq!("1.2.3-".parse(),
+                   parse_error("Error parsing prerelease"));
+        assert_eq!("a.b.c".parse(),
+                   parse_error("Error parsing major identifier"));
+        assert_eq!("1.2.3 abc".parse(),
+                   parse_error("Extra junk after valid version:  abc"));
+    }
+}
diff --git a/rustc_deps/vendor/semver/src/version_req.rs b/rustc_deps/vendor/semver/src/version_req.rs
new file mode 100644
index 0000000..6e6a542
--- /dev/null
+++ b/rustc_deps/vendor/semver/src/version_req.rs
@@ -0,0 +1,895 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::error::Error;
+use std::fmt;
+use std::result;
+use std::str;
+
+use Version;
+use version::Identifier;
+use semver_parser;
+
+#[cfg(feature = "serde")]
+use serde::ser::{Serialize, Serializer};
+#[cfg(feature = "serde")]
+use serde::de::{self, Deserialize, Deserializer, Visitor};
+
+use self::Op::{Ex, Gt, GtEq, Lt, LtEq, Tilde, Compatible, Wildcard};
+use self::WildcardVersion::{Major, Minor, Patch};
+use self::ReqParseError::*;
+
+/// A `VersionReq` is a struct containing a list of predicates that can apply to ranges of version
+/// numbers. Matching operations can then be done with the `VersionReq` against a particular
+/// version to see if it satisfies some or all of the constraints.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub struct VersionReq {
+    predicates: Vec<Predicate>,
+}
+
+impl From<semver_parser::range::VersionReq> for VersionReq {
+    fn from(other: semver_parser::range::VersionReq) -> VersionReq {
+        VersionReq { predicates: other.predicates.into_iter().map(From::from).collect() }
+    }
+}
+
+#[cfg(feature = "serde")]
+impl Serialize for VersionReq {
+    fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
+        where S: Serializer
+    {
+        // Serialize VersionReq as a string.
+        serializer.collect_str(self)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> Deserialize<'de> for VersionReq {
+    fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
+        where D: Deserializer<'de>
+    {
+        struct VersionReqVisitor;
+
+        /// Deserialize `VersionReq` from a string.
+        impl<'de> Visitor<'de> for VersionReqVisitor {
+            type Value = VersionReq;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("a SemVer version requirement as a string")
+            }
+
+            fn visit_str<E>(self, v: &str) -> result::Result<Self::Value, E>
+                where E: de::Error
+            {
+                VersionReq::parse(v).map_err(de::Error::custom)
+            }
+        }
+
+        deserializer.deserialize_str(VersionReqVisitor)
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+enum WildcardVersion {
+    Major,
+    Minor,
+    Patch,
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+enum Op {
+    Ex, // Exact
+    Gt, // Greater than
+    GtEq, // Greater than or equal to
+    Lt, // Less than
+    LtEq, // Less than or equal to
+    Tilde, // e.g. ~1.0.0
+    Compatible, // compatible by definition of semver, indicated by ^
+    Wildcard(WildcardVersion), // x.y.*, x.*, *
+}
+
+impl From<semver_parser::range::Op> for Op {
+    fn from(other: semver_parser::range::Op) -> Op {
+        use semver_parser::range;
+        match other {
+            range::Op::Ex => Op::Ex,
+            range::Op::Gt => Op::Gt,
+            range::Op::GtEq => Op::GtEq,
+            range::Op::Lt => Op::Lt,
+            range::Op::LtEq => Op::LtEq,
+            range::Op::Tilde => Op::Tilde,
+            range::Op::Compatible => Op::Compatible,
+            range::Op::Wildcard(version) => {
+                match version {
+                    range::WildcardVersion::Major => Op::Wildcard(WildcardVersion::Major),
+                    range::WildcardVersion::Minor => Op::Wildcard(WildcardVersion::Minor),
+                    range::WildcardVersion::Patch => Op::Wildcard(WildcardVersion::Patch),
+                }
+            }
+        }
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+struct Predicate {
+    op: Op,
+    major: u64,
+    minor: Option<u64>,
+    patch: Option<u64>,
+    pre: Vec<Identifier>,
+}
+
+impl From<semver_parser::range::Predicate> for Predicate {
+    fn from(other: semver_parser::range::Predicate) -> Predicate {
+        Predicate {
+            op: From::from(other.op),
+            major: other.major,
+            minor: other.minor,
+            patch: other.patch,
+            pre: other.pre.into_iter().map(From::from).collect(),
+        }
+    }
+}
+
+/// A `ReqParseError` is returned from methods which parse a string into a `VersionReq`. Each
+/// enumeration is one of the possible errors that can occur.
+#[derive(Clone, Debug, PartialEq)]
+pub enum ReqParseError {
+    /// The given version requirement is invalid.
+    InvalidVersionRequirement,
+    /// You have already provided an operation, such as `=`, `~`, or `^`. Only use one.
+    OpAlreadySet,
+    /// The sigil you have written is not correct.
+    InvalidSigil,
+    /// All components of a version must be numeric.
+    VersionComponentsMustBeNumeric,
+    /// There was an error parsing an identifier.
+    InvalidIdentifier,
+    /// At least a major version is required.
+    MajorVersionRequired,
+    /// An unimplemented version requirement.
+    UnimplementedVersionRequirement,
+    /// This form of requirement is deprecated.
+    DeprecatedVersionRequirement(VersionReq),
+}
+
+impl fmt::Display for ReqParseError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.description().fmt(f)
+    }
+}
+
+impl Error for ReqParseError {
+    fn description(&self) -> &str {
+        match self {
+            &InvalidVersionRequirement => "the given version requirement is invalid",
+            &OpAlreadySet => {
+                "you have already provided an operation, such as =, ~, or ^; only use one"
+            },
+            &InvalidSigil => "the sigil you have written is not correct",
+            &VersionComponentsMustBeNumeric => "version components must be numeric",
+            &InvalidIdentifier => "invalid identifier",
+            &MajorVersionRequired => "at least a major version number is required",
+            &UnimplementedVersionRequirement => {
+                "the given version requirement is not implemented, yet"
+            },
+            &DeprecatedVersionRequirement(_) => "This requirement is deprecated",
+        }
+    }
+}
+
+impl From<String> for ReqParseError {
+    fn from(other: String) -> ReqParseError {
+        match &*other {
+            "Null is not a valid VersionReq" => ReqParseError::InvalidVersionRequirement,
+            "VersionReq did not parse properly." => ReqParseError::OpAlreadySet,
+            _ => ReqParseError::InvalidVersionRequirement,
+        }
+    }
+}
+
+impl VersionReq {
+    /// `any()` is a factory method which creates a `VersionReq` with no constraints. In other
+    /// words, any version will match against it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use semver::VersionReq;
+    ///
+    /// let anything = VersionReq::any();
+    /// ```
+    pub fn any() -> VersionReq {
+        VersionReq { predicates: vec![] }
+    }
+
+    /// `parse()` is the main constructor of a `VersionReq`. It takes a string like `"^1.2.3"`
+    /// and turns it into a `VersionReq` that matches that particular constraint.
+    ///
+    /// A `Result` is returned which contains a `ReqParseError` if there was a problem parsing the
+    /// `VersionReq`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use semver::VersionReq;
+    ///
+    /// let version = VersionReq::parse("=1.2.3");
+    /// let version = VersionReq::parse(">1.2.3");
+    /// let version = VersionReq::parse("<1.2.3");
+    /// let version = VersionReq::parse("~1.2.3");
+    /// let version = VersionReq::parse("^1.2.3");
+    /// let version = VersionReq::parse("1.2.3"); // synonym for ^1.2.3
+    /// let version = VersionReq::parse("<=1.2.3");
+    /// let version = VersionReq::parse(">=1.2.3");
+    /// ```
+    ///
+    /// This example demonstrates error handling, and will panic.
+    ///
+    /// ```should-panic
+    /// use semver::VersionReq;
+    ///
+    /// let version = match VersionReq::parse("not a version") {
+    ///     Ok(version) => version,
+    ///     Err(e) => panic!("There was a problem parsing: {}", e),
+    /// }
+    /// ```
+    pub fn parse(input: &str) -> Result<VersionReq, ReqParseError> {
+        let res = semver_parser::range::parse(input);
+
+        if let Ok(v) = res {
+            return Ok(From::from(v));
+        }
+
+        return match VersionReq::parse_deprecated(input) {
+            Some(v) => {
+                Err(ReqParseError::DeprecatedVersionRequirement(v))
+            }
+            None => Err(From::from(res.err().unwrap())),
+        }
+    }
+
+    fn parse_deprecated(version: &str) -> Option<VersionReq> {
+        return match version {
+            ".*" => Some(VersionReq::any()),
+            "0.1.0." => Some(VersionReq::parse("0.1.0").unwrap()),
+            "0.3.1.3" => Some(VersionReq::parse("0.3.13").unwrap()),
+            "0.2*" => Some(VersionReq::parse("0.2.*").unwrap()),
+            "*.0" => Some(VersionReq::any()),
+            _ => None,
+        }
+    }
+
+    /// `exact()` is a factory method which creates a `VersionReq` with one exact constraint.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use semver::VersionReq;
+    /// use semver::Version;
+    ///
+    /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] };
+    /// let exact = VersionReq::exact(&version);
+    /// ```
+    pub fn exact(version: &Version) -> VersionReq {
+        VersionReq { predicates: vec![Predicate::exact(version)] }
+    }
+
+    /// `matches()` matches a given `Version` against this `VersionReq`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use semver::VersionReq;
+    /// use semver::Version;
+    ///
+    /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] };
+    /// let exact = VersionReq::exact(&version);
+    ///
+    /// assert!(exact.matches(&version));
+    /// ```
+    pub fn matches(&self, version: &Version) -> bool {
+        // no predicates means anything matches
+        if self.predicates.is_empty() {
+            return true;
+        }
+
+        self.predicates.iter().all(|p| p.matches(version)) &&
+        self.predicates.iter().any(|p| p.pre_tag_is_compatible(version))
+    }
+}
+
+impl str::FromStr for VersionReq {
+    type Err = ReqParseError;
+
+    fn from_str(s: &str) -> Result<VersionReq, ReqParseError> {
+        VersionReq::parse(s)
+    }
+}
+
+impl Predicate {
+    fn exact(version: &Version) -> Predicate {
+        Predicate {
+            op: Ex,
+            major: version.major,
+            minor: Some(version.minor),
+            patch: Some(version.patch),
+            pre: version.pre.clone(),
+        }
+    }
+
+    /// `matches()` takes a `Version` and determines if it matches this particular `Predicate`.
+    pub fn matches(&self, ver: &Version) -> bool {
+        match self.op {
+            Ex => self.is_exact(ver),
+            Gt => self.is_greater(ver),
+            GtEq => self.is_exact(ver) || self.is_greater(ver),
+            Lt => !self.is_exact(ver) && !self.is_greater(ver),
+            LtEq => !self.is_greater(ver),
+            Tilde => self.matches_tilde(ver),
+            Compatible => self.is_compatible(ver),
+            Wildcard(_) => self.matches_wildcard(ver),
+        }
+    }
+
+    fn is_exact(&self, ver: &Version) -> bool {
+        if self.major != ver.major {
+            return false;
+        }
+
+        match self.minor {
+            Some(minor) => {
+                if minor != ver.minor {
+                    return false;
+                }
+            }
+            None => return true,
+        }
+
+        match self.patch {
+            Some(patch) => {
+                if patch != ver.patch {
+                    return false;
+                }
+            }
+            None => return true,
+        }
+
+        if self.pre != ver.pre {
+            return false;
+        }
+
+        true
+    }
+
+    // https://docs.npmjs.com/misc/semver#prerelease-tags
+    fn pre_tag_is_compatible(&self, ver: &Version) -> bool {
+        // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it will
+        // only be
+        // allowed to satisfy comparator sets if at least one comparator with the same
+        // [major,
+        // minor, patch] tuple also has a prerelease tag.
+        !ver.is_prerelease() ||
+        (self.major == ver.major && self.minor == Some(ver.minor) &&
+         self.patch == Some(ver.patch) && !self.pre.is_empty())
+    }
+
+    fn is_greater(&self, ver: &Version) -> bool {
+        if self.major != ver.major {
+            return ver.major > self.major;
+        }
+
+        match self.minor {
+            Some(minor) => {
+                if minor != ver.minor {
+                    return ver.minor > minor;
+                }
+            }
+            None => return false,
+        }
+
+        match self.patch {
+            Some(patch) => {
+                if patch != ver.patch {
+                    return ver.patch > patch;
+                }
+            }
+            None => return false,
+        }
+
+        if !self.pre.is_empty() {
+            return ver.pre.is_empty() || ver.pre > self.pre;
+        }
+
+        false
+    }
+
+    // see https://www.npmjs.org/doc/misc/semver.html for behavior
+    fn matches_tilde(&self, ver: &Version) -> bool {
+        let minor = match self.minor {
+            Some(n) => n,
+            None => return self.major == ver.major,
+        };
+
+        match self.patch {
+            Some(patch) => {
+                self.major == ver.major && minor == ver.minor &&
+                (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver)))
+            }
+            None => self.major == ver.major && minor == ver.minor,
+        }
+    }
+
+    // see https://www.npmjs.org/doc/misc/semver.html for behavior
+    fn is_compatible(&self, ver: &Version) -> bool {
+        if self.major != ver.major {
+            return false;
+        }
+
+        let minor = match self.minor {
+            Some(n) => n,
+            None => return self.major == ver.major,
+        };
+
+        match self.patch {
+            Some(patch) => {
+                if self.major == 0 {
+                    if minor == 0 {
+                        ver.minor == minor && ver.patch == patch && self.pre_is_compatible(ver)
+                    } else {
+                        ver.minor == minor &&
+                        (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver)))
+                    }
+                } else {
+                    ver.minor > minor ||
+                    (ver.minor == minor &&
+                     (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver))))
+                }
+            }
+            None => {
+                if self.major == 0 {
+                    ver.minor == minor
+                } else {
+                    ver.minor >= minor
+                }
+            }
+        }
+    }
+
+    fn pre_is_compatible(&self, ver: &Version) -> bool {
+        ver.pre.is_empty() || ver.pre >= self.pre
+    }
+
+    // see https://www.npmjs.org/doc/misc/semver.html for behavior
+    fn matches_wildcard(&self, ver: &Version) -> bool {
+        match self.op {
+            Wildcard(Major) => true,
+            Wildcard(Minor) => self.major == ver.major,
+            Wildcard(Patch) => {
+                match self.minor {
+                    Some(minor) => self.major == ver.major && minor == ver.minor,
+                    None => {
+                        // minor and patch version astericks mean match on major
+                        self.major == ver.major
+                    }
+                }
+            }
+            _ => false,  // unreachable
+        }
+    }
+}
+
+impl fmt::Display for VersionReq {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        if self.predicates.is_empty() {
+            try!(write!(fmt, "*"));
+        } else {
+            for (i, ref pred) in self.predicates.iter().enumerate() {
+                if i == 0 {
+                    try!(write!(fmt, "{}", pred));
+                } else {
+                    try!(write!(fmt, ", {}", pred));
+                }
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl fmt::Display for Predicate {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match self.op {
+            Wildcard(Major) => try!(write!(fmt, "*")),
+            Wildcard(Minor) => try!(write!(fmt, "{}.*", self.major)),
+            Wildcard(Patch) => {
+                if let Some(minor) = self.minor {
+                    try!(write!(fmt, "{}.{}.*", self.major, minor))
+                } else {
+                    try!(write!(fmt, "{}.*.*", self.major))
+                }
+            }
+            _ => {
+                try!(write!(fmt, "{}{}", self.op, self.major));
+
+                match self.minor {
+                    Some(v) => try!(write!(fmt, ".{}", v)),
+                    None => (),
+                }
+
+                match self.patch {
+                    Some(v) => try!(write!(fmt, ".{}", v)),
+                    None => (),
+                }
+
+                if !self.pre.is_empty() {
+                    try!(write!(fmt, "-"));
+                    for (i, x) in self.pre.iter().enumerate() {
+                        if i != 0 {
+                            try!(write!(fmt, "."))
+                        }
+                        try!(write!(fmt, "{}", x));
+                    }
+                }
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl fmt::Display for Op {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            Ex => try!(write!(fmt, "= ")),
+            Gt => try!(write!(fmt, "> ")),
+            GtEq => try!(write!(fmt, ">= ")),
+            Lt => try!(write!(fmt, "< ")),
+            LtEq => try!(write!(fmt, "<= ")),
+            Tilde => try!(write!(fmt, "~")),
+            Compatible => try!(write!(fmt, "^")),
+            // gets handled specially in Predicate::fmt
+            Wildcard(_) => try!(write!(fmt, "")),
+        }
+        Ok(())
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::{VersionReq, Op};
+    use super::super::version::Version;
+    use std::hash::{Hash, Hasher};
+
+    fn req(s: &str) -> VersionReq {
+        VersionReq::parse(s).unwrap()
+    }
+
+    fn version(s: &str) -> Version {
+        match Version::parse(s) {
+            Ok(v) => v,
+            Err(e) => panic!("`{}` is not a valid version. Reason: {:?}", s, e),
+        }
+    }
+
+    fn assert_match(req: &VersionReq, vers: &[&str]) {
+        for ver in vers.iter() {
+            assert!(req.matches(&version(*ver)), "did not match {}", ver);
+        }
+    }
+
+    fn assert_not_match(req: &VersionReq, vers: &[&str]) {
+        for ver in vers.iter() {
+            assert!(!req.matches(&version(*ver)), "matched {}", ver);
+        }
+    }
+
+    fn calculate_hash<T: Hash>(t: T) -> u64 {
+        use std::collections::hash_map::DefaultHasher;
+
+        let mut s = DefaultHasher::new();
+        t.hash(&mut s);
+        s.finish()
+    }
+
+    #[test]
+    fn test_parsing_default() {
+        let r = req("1.0.0");
+
+        assert_eq!(r.to_string(), "^1.0.0".to_string());
+
+        assert_match(&r, &["1.0.0", "1.0.1"]);
+        assert_not_match(&r, &["0.9.9", "0.10.0", "0.1.0"]);
+    }
+
+    #[test]
+    fn test_parsing_exact() {
+        let r = req("=1.0.0");
+
+        assert!(r.to_string() == "= 1.0.0".to_string());
+        assert_eq!(r.to_string(), "= 1.0.0".to_string());
+
+        assert_match(&r, &["1.0.0"]);
+        assert_not_match(&r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]);
+
+        let r = req("=0.9.0");
+
+        assert_eq!(r.to_string(), "= 0.9.0".to_string());
+
+        assert_match(&r, &["0.9.0"]);
+        assert_not_match(&r, &["0.9.1", "1.9.0", "0.0.9"]);
+
+        let r = req("=0.1.0-beta2.a");
+
+        assert_eq!(r.to_string(), "= 0.1.0-beta2.a".to_string());
+
+        assert_match(&r, &["0.1.0-beta2.a"]);
+        assert_not_match(&r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]);
+    }
+
+    #[test]
+    fn test_parse_metadata_see_issue_88_see_issue_88() {
+        for op in &[Op::Compatible, Op::Ex, Op::Gt, Op::GtEq, Op::Lt, Op::LtEq, Op::Tilde] {
+            req(&format!("{} 1.2.3+meta", op));
+        }
+    }
+
+    #[test]
+    pub fn test_parsing_greater_than() {
+        let r = req(">= 1.0.0");
+
+        assert_eq!(r.to_string(), ">= 1.0.0".to_string());
+
+        assert_match(&r, &["1.0.0", "2.0.0"]);
+        assert_not_match(&r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]);
+
+        let r = req(">= 2.1.0-alpha2");
+
+        assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]);
+        assert_not_match(&r,
+                         &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"]);
+    }
+
+    #[test]
+    pub fn test_parsing_less_than() {
+        let r = req("< 1.0.0");
+
+        assert_eq!(r.to_string(), "< 1.0.0".to_string());
+
+        assert_match(&r, &["0.1.0", "0.0.1"]);
+        assert_not_match(&r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]);
+
+        let r = req("<= 2.1.0-alpha2");
+
+        assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]);
+        assert_not_match(&r,
+                         &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"]);
+    }
+
+    #[test]
+    pub fn test_multiple() {
+        let r = req("> 0.0.9, <= 2.5.3");
+        assert_eq!(r.to_string(), "> 0.0.9, <= 2.5.3".to_string());
+        assert_match(&r, &["0.0.10", "1.0.0", "2.5.3"]);
+        assert_not_match(&r, &["0.0.8", "2.5.4"]);
+
+        let r = req("0.3.0, 0.4.0");
+        assert_eq!(r.to_string(), "^0.3.0, ^0.4.0".to_string());
+        assert_not_match(&r, &["0.0.8", "0.3.0", "0.4.0"]);
+
+        let r = req("<= 0.2.0, >= 0.5.0");
+        assert_eq!(r.to_string(), "<= 0.2.0, >= 0.5.0".to_string());
+        assert_not_match(&r, &["0.0.8", "0.3.0", "0.5.1"]);
+
+        let r = req("0.1.0, 0.1.4, 0.1.6");
+        assert_eq!(r.to_string(), "^0.1.0, ^0.1.4, ^0.1.6".to_string());
+        assert_match(&r, &["0.1.6", "0.1.9"]);
+        assert_not_match(&r, &["0.1.0", "0.1.4", "0.2.0"]);
+
+        assert!(VersionReq::parse("> 0.1.0,").is_err());
+        assert!(VersionReq::parse("> 0.3.0, ,").is_err());
+
+        let r = req(">=0.5.1-alpha3, <0.6");
+        assert_eq!(r.to_string(), ">= 0.5.1-alpha3, < 0.6".to_string());
+        assert_match(&r,
+                     &["0.5.1-alpha3", "0.5.1-alpha4", "0.5.1-beta", "0.5.1", "0.5.5"]);
+        assert_not_match(&r,
+                         &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"]);
+        assert_not_match(&r, &["0.6.0", "0.6.0-pre"]);
+    }
+
+    #[test]
+    pub fn test_parsing_tilde() {
+        let r = req("~1");
+        assert_match(&r, &["1.0.0", "1.0.1", "1.1.1"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "0.0.9"]);
+
+        let r = req("~1.2");
+        assert_match(&r, &["1.2.0", "1.2.1"]);
+        assert_not_match(&r, &["1.1.1", "1.3.0", "0.0.9"]);
+
+        let r = req("~1.2.2");
+        assert_match(&r, &["1.2.2", "1.2.4"]);
+        assert_not_match(&r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
+
+        let r = req("~1.2.3-beta.2");
+        assert_match(&r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]);
+        assert_not_match(&r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]);
+    }
+
+    #[test]
+    pub fn test_parsing_compatible() {
+        let r = req("^1");
+        assert_match(&r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "0.1.4"]);
+        assert_not_match(&r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]);
+
+        let r = req("^1.1");
+        assert_match(&r, &["1.1.2", "1.1.0", "1.2.1"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]);
+
+        let r = req("^1.1.2");
+        assert_match(&r, &["1.1.2", "1.1.4", "1.2.1"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
+        assert_not_match(&r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]);
+
+        let r = req("^0.1.2");
+        assert_match(&r, &["0.1.2", "0.1.4"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
+        assert_not_match(&r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]);
+
+        let r = req("^0.5.1-alpha3");
+        assert_match(&r,
+                     &["0.5.1-alpha3", "0.5.1-alpha4", "0.5.1-beta", "0.5.1", "0.5.5"]);
+        assert_not_match(&r,
+                         &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre", "0.6.0"]);
+
+        let r = req("^0.0.2");
+        assert_match(&r, &["0.0.2"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]);
+
+        let r = req("^0.0");
+        assert_match(&r, &["0.0.2", "0.0.0"]);
+        assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]);
+
+        let r = req("^0");
+        assert_match(&r, &["0.9.1", "0.0.2", "0.0.0"]);
+        assert_not_match(&r, &["2.9.0", "1.1.1"]);
+
+        let r = req("^1.4.2-beta.5");
+        assert_match(&r,
+                     &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"]);
+        assert_not_match(&r,
+                         &["0.9.9", "2.0.0", "1.4.2-alpha", "1.4.2-beta.4", "1.4.3-beta.5"]);
+    }
+
+    #[test]
+    pub fn test_parsing_wildcard() {
+        let r = req("");
+        assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
+        assert_not_match(&r, &[]);
+        let r = req("*");
+        assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
+        assert_not_match(&r, &[]);
+        let r = req("x");
+        assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
+        assert_not_match(&r, &[]);
+        let r = req("X");
+        assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
+        assert_not_match(&r, &[]);
+
+        let r = req("1.*");
+        assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]);
+        assert_not_match(&r, &["0.0.9"]);
+        let r = req("1.x");
+        assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]);
+        assert_not_match(&r, &["0.0.9"]);
+        let r = req("1.X");
+        assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]);
+        assert_not_match(&r, &["0.0.9"]);
+
+        let r = req("1.2.*");
+        assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]);
+        assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
+        let r = req("1.2.x");
+        assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]);
+        assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
+        let r = req("1.2.X");
+        assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]);
+        assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
+    }
+
+    #[test]
+    pub fn test_any() {
+        let r = VersionReq::any();
+        assert_match(&r, &["0.0.1", "0.1.0", "1.0.0"]);
+    }
+
+    #[test]
+    pub fn test_pre() {
+        let r = req("=2.1.1-really.0");
+        assert_match(&r, &["2.1.1-really.0"]);
+    }
+
+    // #[test]
+    // pub fn test_parse_errors() {
+    //    assert_eq!(Err(InvalidVersionRequirement), VersionReq::parse("\0"));
+    //    assert_eq!(Err(OpAlreadySet), VersionReq::parse(">= >= 0.0.2"));
+    //    assert_eq!(Err(InvalidSigil), VersionReq::parse(">== 0.0.2"));
+    //    assert_eq!(Err(VersionComponentsMustBeNumeric),
+    //               VersionReq::parse("a.0.0"));
+    //    assert_eq!(Err(InvalidIdentifier), VersionReq::parse("1.0.0-"));
+    //    assert_eq!(Err(MajorVersionRequired), VersionReq::parse(">="));
+    // }
+
+    #[test]
+    pub fn test_from_str() {
+        assert_eq!("1.0.0".parse::<VersionReq>().unwrap().to_string(),
+                   "^1.0.0".to_string());
+        assert_eq!("=1.0.0".parse::<VersionReq>().unwrap().to_string(),
+                   "= 1.0.0".to_string());
+        assert_eq!("~1".parse::<VersionReq>().unwrap().to_string(),
+                   "~1".to_string());
+        assert_eq!("~1.2".parse::<VersionReq>().unwrap().to_string(),
+                   "~1.2".to_string());
+        assert_eq!("^1".parse::<VersionReq>().unwrap().to_string(),
+                   "^1".to_string());
+        assert_eq!("^1.1".parse::<VersionReq>().unwrap().to_string(),
+                   "^1.1".to_string());
+        assert_eq!("*".parse::<VersionReq>().unwrap().to_string(),
+                   "*".to_string());
+        assert_eq!("1.*".parse::<VersionReq>().unwrap().to_string(),
+                   "1.*".to_string());
+        assert_eq!("< 1.0.0".parse::<VersionReq>().unwrap().to_string(),
+                   "< 1.0.0".to_string());
+    }
+
+    // #[test]
+    // pub fn test_from_str_errors() {
+    //    assert_eq!(Err(InvalidVersionRequirement), "\0".parse::<VersionReq>());
+    //    assert_eq!(Err(OpAlreadySet), ">= >= 0.0.2".parse::<VersionReq>());
+    //    assert_eq!(Err(InvalidSigil), ">== 0.0.2".parse::<VersionReq>());
+    //    assert_eq!(Err(VersionComponentsMustBeNumeric),
+    //               "a.0.0".parse::<VersionReq>());
+    //    assert_eq!(Err(InvalidIdentifier), "1.0.0-".parse::<VersionReq>());
+    //    assert_eq!(Err(MajorVersionRequired), ">=".parse::<VersionReq>());
+    // }
+
+    #[test]
+    fn test_cargo3202() {
+        let v = "0.*.*".parse::<VersionReq>().unwrap();
+        assert_eq!("0.*.*", format!("{}", v.predicates[0]));
+
+        let v = "0.0.*".parse::<VersionReq>().unwrap();
+        assert_eq!("0.0.*", format!("{}", v.predicates[0]));
+
+        let r = req("0.*.*");
+        assert_match(&r, &["0.5.0"]);
+    }
+
+    #[test]
+    fn test_eq_hash() {
+        assert!(req("^1") == req("^1"));
+        assert!(calculate_hash(req("^1")) == calculate_hash(req("^1")));
+        assert!(req("^1") != req("^2"));
+    }
+
+    #[test]
+    fn test_ordering() {
+        assert!(req("=1") < req("*"));
+        assert!(req(">1") < req("*"));
+        assert!(req(">=1") < req("*"));
+        assert!(req("<1") < req("*"));
+        assert!(req("<=1") < req("*"));
+        assert!(req("~1") < req("*"));
+        assert!(req("^1") < req("*"));
+        assert!(req("*") == req("*"));
+    }
+}
diff --git a/rustc_deps/vendor/semver/tests/deprecation.rs b/rustc_deps/vendor/semver/tests/deprecation.rs
new file mode 100644
index 0000000..a5f533a
--- /dev/null
+++ b/rustc_deps/vendor/semver/tests/deprecation.rs
@@ -0,0 +1,22 @@
+extern crate semver;
+
+#[test]
+fn test_regressions() {
+    use semver::VersionReq;
+    use semver::ReqParseError;
+
+    let versions = vec![
+        (".*", VersionReq::any()),
+        ("0.1.0.", VersionReq::parse("0.1.0").unwrap()),
+        ("0.3.1.3", VersionReq::parse("0.3.13").unwrap()),
+        ("0.2*", VersionReq::parse("0.2.*").unwrap()),
+        ("*.0", VersionReq::any()),
+    ];
+
+    for (version, requirement) in versions.into_iter() {
+        let parsed = VersionReq::parse(version);
+        let error = parsed.err().unwrap();
+
+        assert_eq!(ReqParseError::DeprecatedVersionRequirement(requirement), error);
+    }
+}
diff --git a/rustc_deps/vendor/semver/tests/regression.rs b/rustc_deps/vendor/semver/tests/regression.rs
new file mode 100644
index 0000000..ef568a7
--- /dev/null
+++ b/rustc_deps/vendor/semver/tests/regression.rs
@@ -0,0 +1,25 @@
+extern crate semver;
+extern crate crates_index;
+extern crate tempdir;
+
+// This test checks to see if every existing crate parses successfully. Important to not break the
+// Rust universe!
+
+#[cfg(feature = "ci")]
+#[test]
+fn test_regressions() {
+    use tempdir::TempDir;
+    use crates_index::Index;
+    use semver::Version;
+
+    let dir = TempDir::new("semver").unwrap();
+    let index = Index::new(dir.into_path());
+    index.clone().unwrap();
+
+    for krate in index.crates() {
+        for version in krate.versions() {
+            let v = version.version();
+            assert!(Version::parse(v).is_ok(), "failed: {} ({})", version.name(), v);
+        }
+    }
+}
diff --git a/rustc_deps/vendor/semver/tests/serde.rs b/rustc_deps/vendor/semver/tests/serde.rs
new file mode 100644
index 0000000..bcb9264
--- /dev/null
+++ b/rustc_deps/vendor/semver/tests/serde.rs
@@ -0,0 +1,90 @@
+#![cfg(feature = "serde")]
+
+#[macro_use]
+extern crate serde_derive;
+
+extern crate semver;
+extern crate serde_json;
+
+use semver::{Identifier, Version, VersionReq};
+
+#[derive(Serialize, Deserialize, PartialEq, Debug)]
+struct Identified {
+    name: String,
+    identifier: Identifier,
+}
+
+#[derive(Serialize, Deserialize, PartialEq, Debug)]
+struct Versioned {
+    name: String,
+    vers: Version,
+}
+
+#[test]
+fn serialize_identifier() {
+    let id = Identified {
+        name: "serde".to_owned(),
+        identifier: Identifier::Numeric(100),
+    };
+    let j = serde_json::to_string(&id).unwrap();
+    assert_eq!(j, r#"{"name":"serde","identifier":100}"#);
+
+    let id = Identified {
+        name: "serde".to_owned(),
+        identifier: Identifier::AlphaNumeric("b100".to_owned()),
+    };
+    let j = serde_json::to_string(&id).unwrap();
+    assert_eq!(j, r#"{"name":"serde","identifier":"b100"}"#);
+}
+
+#[test]
+fn deserialize_identifier() {
+    let j = r#"{"name":"serde","identifier":100}"#;
+    let id = serde_json::from_str::<Identified>(j).unwrap();
+    let expected = Identified {
+        name: "serde".to_owned(),
+        identifier: Identifier::Numeric(100),
+    };
+    assert_eq!(id, expected);
+
+    let j = r#"{"name":"serde","identifier":"b100"}"#;
+    let id = serde_json::from_str::<Identified>(j).unwrap();
+    let expected = Identified {
+        name: "serde".to_owned(),
+        identifier: Identifier::AlphaNumeric("b100".to_owned()),
+    };
+    assert_eq!(id, expected);
+}
+
+#[test]
+fn serialize_version() {
+    let v = Versioned {
+        name: "serde".to_owned(),
+        vers: Version::parse("1.0.0").unwrap(),
+    };
+    let j = serde_json::to_string(&v).unwrap();
+    assert_eq!(j, r#"{"name":"serde","vers":"1.0.0"}"#);
+}
+
+#[test]
+fn deserialize_version() {
+    let j = r#"{"name":"serde","vers":"1.0.0"}"#;
+    let v = serde_json::from_str::<Versioned>(j).unwrap();
+    let expected = Versioned {
+        name: "serde".to_owned(),
+        vers: Version::parse("1.0.0").unwrap(),
+    };
+    assert_eq!(v, expected);
+}
+
+#[test]
+fn serialize_versionreq() {
+    let v = VersionReq::exact(&Version::parse("1.0.0").unwrap());
+
+    assert_eq!(serde_json::to_string(&v).unwrap(), r#""= 1.0.0""#);
+}
+
+#[test]
+fn deserialize_versionreq() {
+    assert_eq!("1.0.0".parse::<VersionReq>().unwrap(), serde_json::from_str(r#""1.0.0""#).unwrap());
+}