Add hyper and dependencies

OSRB-77

Change-Id: Ib605732ff81c7cd670c21e4f756fe7ebb25b6c6f
diff --git a/rustc_deps/Cargo.lock b/rustc_deps/Cargo.lock
index 16dca5a..6297fef 100644
--- a/rustc_deps/Cargo.lock
+++ b/rustc_deps/Cargo.lock
@@ -361,6 +361,11 @@
 ]
 
 [[package]]
+name = "fnv"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "font-rs"
 version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -388,6 +393,7 @@
  "futures-preview 0.3.0-alpha.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "getopts 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hyper 0.12.13 (registry+https://github.com/rust-lang/crates.io-index)",
  "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -438,6 +444,11 @@
 version = "0.3.3"
 
 [[package]]
+name = "futures"
+version = "0.1.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "futures-channel-preview"
 version = "0.3.0-alpha.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -542,16 +553,61 @@
 ]
 
 [[package]]
+name = "h2"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "http 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "hex"
 version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "http"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "httparse"
 version = "1.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "hyper"
+version = "0.12.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "h2 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "http 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "httparse 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "idna"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -562,6 +618,11 @@
 ]
 
 [[package]]
+name = "indexmap"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "iovec"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1096,6 +1157,11 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "string"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "strsim"
 version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1246,6 +1312,16 @@
 ]
 
 [[package]]
+name = "tokio-io"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "toml"
 version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1254,6 +1330,11 @@
 ]
 
 [[package]]
+name = "try-lock"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "twoway"
 version = "0.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1367,6 +1448,16 @@
 ]
 
 [[package]]
+name = "want"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "winapi"
 version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1506,7 +1597,9 @@
 "checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
 "checksum filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "da4b9849e77b13195302c174324b5ba73eec9b236b24c221a61000daefb95c5f"
 "checksum flate2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3b0c7353385f92079524de3b7116cf99d73947c08a7472774e9b3b04bff3b901"
+"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
 "checksum font-rs 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "33939d997b0ef798cc92c9cc49a535c9b085273e121cc8e8281561be1fd76cd2"
+"checksum futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)" = "49e7653e374fe0d0c12de4250f0bdb60680b8c80eed558c5c7538eec9c89e21b"
 "checksum futures-channel-preview 0.3.0-alpha.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ab71b4d4948210b1ff4703e66cc9d931afd8ba35b5657cf369374a091cca96ed"
 "checksum futures-core-preview 0.3.0-alpha.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e5fea6d4920bf1745d87f8624abdd38ed03fb5b644767b84a0de882fe46c4153"
 "checksum futures-executor-preview 0.3.0-alpha.9 (registry+https://github.com/rust-lang/crates.io-index)" = "43d4bb2cc4c0cefc554b0cf12f7151e0a578cf1820a36453eff1d955dde762ff"
@@ -1518,9 +1611,13 @@
 "checksum generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d"
 "checksum getopts 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0a7292d30132fb5424b354f5dc02512a86e4c516fe544bb7a25e7f266951b797"
 "checksum gzip-header 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9e59524a909fe98bb6c6f2cf1f27f2f6772887a496bf4c68cae0d94f884586"
+"checksum h2 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "7dd33bafe2e6370e6c8eb0cf1b8c5f93390b90acde7e9b03723f166b28b648ed"
 "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
+"checksum http 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "24f58e8c2d8e886055c3ead7b28793e1455270b5fb39650984c224bc538ba581"
 "checksum httparse 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e8734b0cfd3bc3e101ec59100e101c2eecd19282202e87808b3037b442777a83"
+"checksum hyper 0.12.13 (registry+https://github.com/rust-lang/crates.io-index)" = "95ffee0d1d30de4313fdaaa485891ce924991d45bbc18adfc8ac5b1639e62fbb"
 "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
+"checksum indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7e81a7c05f79578dbc15793d8b619db9ba32b4577003ef3af1a91c416798c58d"
 "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
 "checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
 "checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
@@ -1584,6 +1681,7 @@
 "checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"
 "checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
 "checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
+"checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970"
 "checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
 "checksum structopt 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "d77af7242f18c40fd19cb270985930f239ee1646cfb482050bbae9da1d18743b"
 "checksum structopt-derive 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "17ff01fe96de9d16e7372ae5f19dd7ece2c703b51043c3db9ea27f9e393ea311"
@@ -1599,7 +1697,9 @@
 "checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b"
 "checksum timebomb 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f0886f4b637067027d8c9a038a9249d95648689d1a91009d9abb895625f883a"
 "checksum tiny_http 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442681f9f72e440be192700eeb2861e4174b9983f16f4877c93a134cb5e5f63"
+"checksum tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "7392fe0a70d5ce0c882c4778116c519bd5dbaa8a7c3ae3d04578b3afafdcda21"
 "checksum toml 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "4a2ecc31b0351ea18b3fe11274b8db6e4d82bce861bbb22e6dbed40417902c65"
+"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"
 "checksum twoway 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1"
 "checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
 "checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"
@@ -1617,6 +1717,7 @@
 "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
 "checksum vte 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4f42f536e22f7fcbb407639765c8fd78707a33109301f834a594758bedd6e8cf"
 "checksum walkdir 2.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "af464bc7be7b785c7ac72e266a6b67c4c9070155606f51655a650a6686204e35"
+"checksum want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "797464475f30ddb8830cc529aaaae648d581f99e2036a928877dfde027ddf6b3"
 "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
 "checksum xml-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c1cb601d29fe2c2ac60a2b2e5e293994d87a1f6fa9687a31a15270f909be9c2"
 "checksum xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "541b12c998c5b56aa2b4e6f18f03664eef9a4fd0a246a55594efae6cc2d964b5"
diff --git a/rustc_deps/Cargo.toml b/rustc_deps/Cargo.toml
index f542b3c..002f18c 100644
--- a/rustc_deps/Cargo.toml
+++ b/rustc_deps/Cargo.toml
@@ -24,6 +24,7 @@
 futures-preview = "=0.3.0-alpha.9"
 getopts = "0.2"
 hex = "0.3.2"
+hyper = { version = "0.12.13", default-features = false }
 itertools = "0.7"
 lazy_static = "1"
 libc = "0.2"
diff --git a/rustc_deps/vendor/either/.cargo-checksum.json b/rustc_deps/vendor/either/.cargo-checksum.json
index ec8372d..3d902c5 100644
--- a/rustc_deps/vendor/either/.cargo-checksum.json
+++ b/rustc_deps/vendor/either/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{".travis.yml":"cb7544c8f5d89b0c57e4d0e53b0676bae662c21a4ee24226ab5a81a4ac8934ce","Cargo.toml":"a4328fa9d5ecf861ff70b57c4e77c444e1ed5b9c1a696132e4e0733058569749","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README-crates.io.md":"b775991a01ab4a0a8de6169f597775319d9ce8178f5c74ccdc634f13a286b20c","README.rst":"f8a01f3853a0ed5c4fdece028cc4822dd32999272f80cc7406de233aa5701f75","src/lib.rs":"9cfcc2230e824dd96d7102bd1f767ca1308ba908b18bf9419ef4c46001546953"},"package":"3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0"}
\ No newline at end of file
+{"files":{".travis.yml":"cb7544c8f5d89b0c57e4d0e53b0676bae662c21a4ee24226ab5a81a4ac8934ce","Cargo.lock":"b3c76c4a473ca7450b5bec0ba80c6e694abbbbe8abaad3fd766b8fd145d004f0","Cargo.toml":"a4328fa9d5ecf861ff70b57c4e77c444e1ed5b9c1a696132e4e0733058569749","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README-crates.io.md":"b775991a01ab4a0a8de6169f597775319d9ce8178f5c74ccdc634f13a286b20c","README.rst":"f8a01f3853a0ed5c4fdece028cc4822dd32999272f80cc7406de233aa5701f75","src/lib.rs":"9cfcc2230e824dd96d7102bd1f767ca1308ba908b18bf9419ef4c46001546953"},"package":"3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/either/Cargo.lock b/rustc_deps/vendor/either/Cargo.lock
new file mode 100644
index 0000000..c9313df
--- /dev/null
+++ b/rustc_deps/vendor/either/Cargo.lock
@@ -0,0 +1,63 @@
+[[package]]
+name = "either"
+version = "1.5.0"
+dependencies = [
+ "serde 1.0.74 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde_derive 1.0.74 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "syn"
+version = "0.14.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum proc-macro2 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "ee5697238f0d893c7f0ecc59c0999f18d2af85e424de441178bcacc9f9e6cf67"
+"checksum quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "dd636425967c33af890042c483632d33fa7a18f19ad1d7ea72e8998c6ef8dea5"
+"checksum serde 1.0.74 (registry+https://github.com/rust-lang/crates.io-index)" = "f218becd0d51dd24297ef804cb9b2de179dcdc2a3ddf8a73b04b4d595d9e6338"
+"checksum serde_derive 1.0.74 (registry+https://github.com/rust-lang/crates.io-index)" = "47e3375b02728fa6f8c53cb8c1ad3dea7689e12793b6af399ad1e0e202f91c18"
+"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
+"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
diff --git a/rustc_deps/vendor/fnv/.cargo-checksum.json b/rustc_deps/vendor/fnv/.cargo-checksum.json
new file mode 100644
index 0000000..f8b68ff
--- /dev/null
+++ b/rustc_deps/vendor/fnv/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{".travis.yml":"29b74b95210896ce634c11a9037638668473b5a1b3b1716c505cb04dbb6341fa","Cargo.toml":"8a89e16dc6b373aa151fb2d1221c699b39b1dd5599aa616897fa85511b71104f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"65fdb6c76cd61612070c066eec9ecdb30ee74fb27859d0d9af58b9f499fd0c3e","README.md":"9398b0785fdaf32fe61dca3d6f16e69cf53ab2911c9435053d1ec962cd92b8fa","lib.rs":"0303c8c75e9cf35f5379f67cfc003ba0b51e9643dc8f3bd346322595d7685d97"},"package":"2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/fnv/.travis.yml b/rustc_deps/vendor/fnv/.travis.yml
new file mode 100644
index 0000000..9c58f03
--- /dev/null
+++ b/rustc_deps/vendor/fnv/.travis.yml
@@ -0,0 +1,8 @@
+language: rust
+rust:
+  - nightly
+  - beta
+  - stable
+
+notifications:
+  webhooks: http://build.servo.org:54856/travis
diff --git a/rustc_deps/vendor/fnv/Cargo.toml b/rustc_deps/vendor/fnv/Cargo.toml
new file mode 100644
index 0000000..1157799
--- /dev/null
+++ b/rustc_deps/vendor/fnv/Cargo.toml
@@ -0,0 +1,25 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "fnv"
+version = "1.0.6"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+description = "Fowler–Noll–Vo hash function"
+documentation = "https://doc.servo.org/fnv/"
+readme = "README.md"
+license = "Apache-2.0 / MIT"
+repository = "https://github.com/servo/rust-fnv"
+
+[lib]
+name = "fnv"
+path = "lib.rs"
diff --git a/rustc_deps/vendor/fnv/LICENSE-APACHE b/rustc_deps/vendor/fnv/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rustc_deps/vendor/fnv/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rustc_deps/vendor/fnv/LICENSE-MIT b/rustc_deps/vendor/fnv/LICENSE-MIT
new file mode 100644
index 0000000..bc976a2
--- /dev/null
+++ b/rustc_deps/vendor/fnv/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2017 Contributors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/fnv/README.md b/rustc_deps/vendor/fnv/README.md
new file mode 100644
index 0000000..6a4c4ae
--- /dev/null
+++ b/rustc_deps/vendor/fnv/README.md
@@ -0,0 +1,81 @@
+# rust-fnv
+
+An implementation of the [Fowler–Noll–Vo hash function][chongo].
+
+### [Read the documentation](https://doc.servo.org/fnv/)
+
+
+## About
+
+The FNV hash function is a custom `Hasher` implementation that is more
+efficient for smaller hash keys.
+
+[The Rust FAQ states that][faq] while the default `Hasher` implementation,
+SipHash, is good in many cases, it is notably slower than other algorithms
+with short keys, such as when you have a map of integers to other values.
+In cases like these, [FNV is demonstrably faster][graphs].
+
+Its disadvantages are that it performs badly on larger inputs, and
+provides no protection against collision attacks, where a malicious user
+can craft specific keys designed to slow a hasher down. Thus, it is
+important to profile your program to ensure that you are using small hash
+keys, and be certain that your program could not be exposed to malicious
+inputs (including being a networked server).
+
+The Rust compiler itself uses FNV, as it is not worried about
+denial-of-service attacks, and can assume that its inputs are going to be
+small—a perfect use case for FNV.
+
+
+## Usage
+
+To include this crate in your program, add the following to your `Cargo.toml`:
+
+```toml
+[dependencies]
+fnv = "1.0.3"
+```
+
+
+## Using FNV in a HashMap
+
+The `FnvHashMap` type alias is the easiest way to use the standard library’s
+`HashMap` with FNV.
+
+```rust
+use fnv::FnvHashMap;
+
+let mut map = FnvHashMap::default();
+map.insert(1, "one");
+map.insert(2, "two");
+
+map = FnvHashMap::with_capacity_and_hasher(10, Default::default());
+map.insert(1, "one");
+map.insert(2, "two");
+```
+
+Note, the standard library’s `HashMap::new` and `HashMap::with_capacity`
+are only implemented for the `RandomState` hasher, so using `Default` to
+get the hasher is the next best option.
+
+
+## Using FNV in a HashSet
+
+Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet`
+with FNV.
+
+```rust
+use fnv::FnvHashSet;
+
+let mut set = FnvHashSet::default();
+set.insert(1);
+set.insert(2);
+
+set = FnvHashSet::with_capacity_and_hasher(10, Default::default());
+set.insert(1);
+set.insert(2);
+```
+
+[chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html
+[faq]: https://www.rust-lang.org/en-US/faq.html#why-are-rusts-hashmaps-slow
+[graphs]: http://cglab.ca/~abeinges/blah/hash-rs/
diff --git a/rustc_deps/vendor/fnv/lib.rs b/rustc_deps/vendor/fnv/lib.rs
new file mode 100644
index 0000000..eaf3d44
--- /dev/null
+++ b/rustc_deps/vendor/fnv/lib.rs
@@ -0,0 +1,349 @@
+//! An implementation of the [Fowler–Noll–Vo hash function][chongo].
+//!
+//! ## About
+//!
+//! The FNV hash function is a custom `Hasher` implementation that is more
+//! efficient for smaller hash keys.
+//!
+//! [The Rust FAQ states that][faq] while the default `Hasher` implementation,
+//! SipHash, is good in many cases, it is notably slower than other algorithms
+//! with short keys, such as when you have a map of integers to other values.
+//! In cases like these, [FNV is demonstrably faster][graphs].
+//!
+//! Its disadvantages are that it performs badly on larger inputs, and
+//! provides no protection against collision attacks, where a malicious user
+//! can craft specific keys designed to slow a hasher down. Thus, it is
+//! important to profile your program to ensure that you are using small hash
+//! keys, and be certain that your program could not be exposed to malicious
+//! inputs (including being a networked server).
+//!
+//! The Rust compiler itself uses FNV, as it is not worried about
+//! denial-of-service attacks, and can assume that its inputs are going to be
+//! small—a perfect use case for FNV.
+//!
+//!
+//! ## Using FNV in a `HashMap`
+//!
+//! The `FnvHashMap` type alias is the easiest way to use the standard library’s
+//! `HashMap` with FNV.
+//!
+//! ```rust
+//! use fnv::FnvHashMap;
+//!
+//! let mut map = FnvHashMap::default();
+//! map.insert(1, "one");
+//! map.insert(2, "two");
+//!
+//! map = FnvHashMap::with_capacity_and_hasher(10, Default::default());
+//! map.insert(1, "one");
+//! map.insert(2, "two");
+//! ```
+//!
+//! Note, the standard library’s `HashMap::new` and `HashMap::with_capacity`
+//! are only implemented for the `RandomState` hasher, so using `Default` to
+//! get the hasher is the next best option.
+//!
+//! ## Using FNV in a `HashSet`
+//!
+//! Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet`
+//! with FNV.
+//!
+//! ```rust
+//! use fnv::FnvHashSet;
+//!
+//! let mut set = FnvHashSet::default();
+//! set.insert(1);
+//! set.insert(2);
+//!
+//! set = FnvHashSet::with_capacity_and_hasher(10, Default::default());
+//! set.insert(1);
+//! set.insert(2);
+//! ```
+//!
+//! [chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html
+//! [faq]: https://www.rust-lang.org/en-US/faq.html#why-are-rusts-hashmaps-slow
+//! [graphs]: http://cglab.ca/~abeinges/blah/hash-rs/
+
+
+use std::default::Default;
+use std::hash::{Hasher, BuildHasherDefault};
+use std::collections::{HashMap, HashSet};
+
+/// An implementation of the Fowler–Noll–Vo hash function.
+///
+/// See the [crate documentation](index.html) for more details.
+#[allow(missing_copy_implementations)]
+pub struct FnvHasher(u64);
+
+impl Default for FnvHasher {
+
+    #[inline]
+    fn default() -> FnvHasher {
+        FnvHasher(0xcbf29ce484222325)
+    }
+}
+
+impl FnvHasher {
+    /// Create an FNV hasher starting with a state corresponding
+    /// to the hash `key`.
+    #[inline]
+    pub fn with_key(key: u64) -> FnvHasher {
+        FnvHasher(key)
+    }
+}
+
+impl Hasher for FnvHasher {
+    #[inline]
+    fn finish(&self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    fn write(&mut self, bytes: &[u8]) {
+        let FnvHasher(mut hash) = *self;
+
+        for byte in bytes.iter() {
+            hash = hash ^ (*byte as u64);
+            hash = hash.wrapping_mul(0x100000001b3);
+        }
+
+        *self = FnvHasher(hash);
+    }
+}
+
+/// A builder for default FNV hashers.
+pub type FnvBuildHasher = BuildHasherDefault<FnvHasher>;
+
+/// A `HashMap` using a default FNV hasher.
+pub type FnvHashMap<K, V> = HashMap<K, V, FnvBuildHasher>;
+
+/// A `HashSet` using a default FNV hasher.
+pub type FnvHashSet<T> = HashSet<T, FnvBuildHasher>;
+
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use std::hash::Hasher;
+
+    fn fnv1a(bytes: &[u8]) -> u64 {
+        let mut hasher = FnvHasher::default();
+        hasher.write(bytes);
+        hasher.finish()
+    }
+
+    fn repeat_10(bytes: &[u8]) -> Vec<u8> {
+        (0..10).flat_map(|_| bytes.iter().cloned()).collect()
+    }
+
+    fn repeat_500(bytes: &[u8]) -> Vec<u8> {
+        (0..500).flat_map(|_| bytes.iter().cloned()).collect()
+    }
+
+    #[test]
+    fn basic_tests() {
+        assert_eq!(fnv1a(b""), 0xcbf29ce484222325);
+        assert_eq!(fnv1a(b"a"), 0xaf63dc4c8601ec8c);
+        assert_eq!(fnv1a(b"b"), 0xaf63df4c8601f1a5);
+        assert_eq!(fnv1a(b"c"), 0xaf63de4c8601eff2);
+        assert_eq!(fnv1a(b"d"), 0xaf63d94c8601e773);
+        assert_eq!(fnv1a(b"e"), 0xaf63d84c8601e5c0);
+        assert_eq!(fnv1a(b"f"), 0xaf63db4c8601ead9);
+        assert_eq!(fnv1a(b"fo"), 0x08985907b541d342);
+        assert_eq!(fnv1a(b"foo"), 0xdcb27518fed9d577);
+        assert_eq!(fnv1a(b"foob"), 0xdd120e790c2512af);
+        assert_eq!(fnv1a(b"fooba"), 0xcac165afa2fef40a);
+        assert_eq!(fnv1a(b"foobar"), 0x85944171f73967e8);
+        assert_eq!(fnv1a(b"\0"), 0xaf63bd4c8601b7df);
+        assert_eq!(fnv1a(b"a\0"), 0x089be207b544f1e4);
+        assert_eq!(fnv1a(b"b\0"), 0x08a61407b54d9b5f);
+        assert_eq!(fnv1a(b"c\0"), 0x08a2ae07b54ab836);
+        assert_eq!(fnv1a(b"d\0"), 0x0891b007b53c4869);
+        assert_eq!(fnv1a(b"e\0"), 0x088e4a07b5396540);
+        assert_eq!(fnv1a(b"f\0"), 0x08987c07b5420ebb);
+        assert_eq!(fnv1a(b"fo\0"), 0xdcb28a18fed9f926);
+        assert_eq!(fnv1a(b"foo\0"), 0xdd1270790c25b935);
+        assert_eq!(fnv1a(b"foob\0"), 0xcac146afa2febf5d);
+        assert_eq!(fnv1a(b"fooba\0"), 0x8593d371f738acfe);
+        assert_eq!(fnv1a(b"foobar\0"), 0x34531ca7168b8f38);
+        assert_eq!(fnv1a(b"ch"), 0x08a25607b54a22ae);
+        assert_eq!(fnv1a(b"cho"), 0xf5faf0190cf90df3);
+        assert_eq!(fnv1a(b"chon"), 0xf27397910b3221c7);
+        assert_eq!(fnv1a(b"chong"), 0x2c8c2b76062f22e0);
+        assert_eq!(fnv1a(b"chongo"), 0xe150688c8217b8fd);
+        assert_eq!(fnv1a(b"chongo "), 0xf35a83c10e4f1f87);
+        assert_eq!(fnv1a(b"chongo w"), 0xd1edd10b507344d0);
+        assert_eq!(fnv1a(b"chongo wa"), 0x2a5ee739b3ddb8c3);
+        assert_eq!(fnv1a(b"chongo was"), 0xdcfb970ca1c0d310);
+        assert_eq!(fnv1a(b"chongo was "), 0x4054da76daa6da90);
+        assert_eq!(fnv1a(b"chongo was h"), 0xf70a2ff589861368);
+        assert_eq!(fnv1a(b"chongo was he"), 0x4c628b38aed25f17);
+        assert_eq!(fnv1a(b"chongo was her"), 0x9dd1f6510f78189f);
+        assert_eq!(fnv1a(b"chongo was here"), 0xa3de85bd491270ce);
+        assert_eq!(fnv1a(b"chongo was here!"), 0x858e2fa32a55e61d);
+        assert_eq!(fnv1a(b"chongo was here!\n"), 0x46810940eff5f915);
+        assert_eq!(fnv1a(b"ch\0"), 0xf5fadd190cf8edaa);
+        assert_eq!(fnv1a(b"cho\0"), 0xf273ed910b32b3e9);
+        assert_eq!(fnv1a(b"chon\0"), 0x2c8c5276062f6525);
+        assert_eq!(fnv1a(b"chong\0"), 0xe150b98c821842a0);
+        assert_eq!(fnv1a(b"chongo\0"), 0xf35aa3c10e4f55e7);
+        assert_eq!(fnv1a(b"chongo \0"), 0xd1ed680b50729265);
+        assert_eq!(fnv1a(b"chongo w\0"), 0x2a5f0639b3dded70);
+        assert_eq!(fnv1a(b"chongo wa\0"), 0xdcfbaa0ca1c0f359);
+        assert_eq!(fnv1a(b"chongo was\0"), 0x4054ba76daa6a430);
+        assert_eq!(fnv1a(b"chongo was \0"), 0xf709c7f5898562b0);
+        assert_eq!(fnv1a(b"chongo was h\0"), 0x4c62e638aed2f9b8);
+        assert_eq!(fnv1a(b"chongo was he\0"), 0x9dd1a8510f779415);
+        assert_eq!(fnv1a(b"chongo was her\0"), 0xa3de2abd4911d62d);
+        assert_eq!(fnv1a(b"chongo was here\0"), 0x858e0ea32a55ae0a);
+        assert_eq!(fnv1a(b"chongo was here!\0"), 0x46810f40eff60347);
+        assert_eq!(fnv1a(b"chongo was here!\n\0"), 0xc33bce57bef63eaf);
+        assert_eq!(fnv1a(b"cu"), 0x08a24307b54a0265);
+        assert_eq!(fnv1a(b"cur"), 0xf5b9fd190cc18d15);
+        assert_eq!(fnv1a(b"curd"), 0x4c968290ace35703);
+        assert_eq!(fnv1a(b"curds"), 0x07174bd5c64d9350);
+        assert_eq!(fnv1a(b"curds "), 0x5a294c3ff5d18750);
+        assert_eq!(fnv1a(b"curds a"), 0x05b3c1aeb308b843);
+        assert_eq!(fnv1a(b"curds an"), 0xb92a48da37d0f477);
+        assert_eq!(fnv1a(b"curds and"), 0x73cdddccd80ebc49);
+        assert_eq!(fnv1a(b"curds and "), 0xd58c4c13210a266b);
+        assert_eq!(fnv1a(b"curds and w"), 0xe78b6081243ec194);
+        assert_eq!(fnv1a(b"curds and wh"), 0xb096f77096a39f34);
+        assert_eq!(fnv1a(b"curds and whe"), 0xb425c54ff807b6a3);
+        assert_eq!(fnv1a(b"curds and whey"), 0x23e520e2751bb46e);
+        assert_eq!(fnv1a(b"curds and whey\n"), 0x1a0b44ccfe1385ec);
+        assert_eq!(fnv1a(b"cu\0"), 0xf5ba4b190cc2119f);
+        assert_eq!(fnv1a(b"cur\0"), 0x4c962690ace2baaf);
+        assert_eq!(fnv1a(b"curd\0"), 0x0716ded5c64cda19);
+        assert_eq!(fnv1a(b"curds\0"), 0x5a292c3ff5d150f0);
+        assert_eq!(fnv1a(b"curds \0"), 0x05b3e0aeb308ecf0);
+        assert_eq!(fnv1a(b"curds a\0"), 0xb92a5eda37d119d9);
+        assert_eq!(fnv1a(b"curds an\0"), 0x73ce41ccd80f6635);
+        assert_eq!(fnv1a(b"curds and\0"), 0xd58c2c132109f00b);
+        assert_eq!(fnv1a(b"curds and \0"), 0xe78baf81243f47d1);
+        assert_eq!(fnv1a(b"curds and w\0"), 0xb0968f7096a2ee7c);
+        assert_eq!(fnv1a(b"curds and wh\0"), 0xb425a84ff807855c);
+        assert_eq!(fnv1a(b"curds and whe\0"), 0x23e4e9e2751b56f9);
+        assert_eq!(fnv1a(b"curds and whey\0"), 0x1a0b4eccfe1396ea);
+        assert_eq!(fnv1a(b"curds and whey\n\0"), 0x54abd453bb2c9004);
+        assert_eq!(fnv1a(b"hi"), 0x08ba5f07b55ec3da);
+        assert_eq!(fnv1a(b"hi\0"), 0x337354193006cb6e);
+        assert_eq!(fnv1a(b"hello"), 0xa430d84680aabd0b);
+        assert_eq!(fnv1a(b"hello\0"), 0xa9bc8acca21f39b1);
+        assert_eq!(fnv1a(b"\xff\x00\x00\x01"), 0x6961196491cc682d);
+        assert_eq!(fnv1a(b"\x01\x00\x00\xff"), 0xad2bb1774799dfe9);
+        assert_eq!(fnv1a(b"\xff\x00\x00\x02"), 0x6961166491cc6314);
+        assert_eq!(fnv1a(b"\x02\x00\x00\xff"), 0x8d1bb3904a3b1236);
+        assert_eq!(fnv1a(b"\xff\x00\x00\x03"), 0x6961176491cc64c7);
+        assert_eq!(fnv1a(b"\x03\x00\x00\xff"), 0xed205d87f40434c7);
+        assert_eq!(fnv1a(b"\xff\x00\x00\x04"), 0x6961146491cc5fae);
+        assert_eq!(fnv1a(b"\x04\x00\x00\xff"), 0xcd3baf5e44f8ad9c);
+        assert_eq!(fnv1a(b"\x40\x51\x4e\x44"), 0xe3b36596127cd6d8);
+        assert_eq!(fnv1a(b"\x44\x4e\x51\x40"), 0xf77f1072c8e8a646);
+        assert_eq!(fnv1a(b"\x40\x51\x4e\x4a"), 0xe3b36396127cd372);
+        assert_eq!(fnv1a(b"\x4a\x4e\x51\x40"), 0x6067dce9932ad458);
+        assert_eq!(fnv1a(b"\x40\x51\x4e\x54"), 0xe3b37596127cf208);
+        assert_eq!(fnv1a(b"\x54\x4e\x51\x40"), 0x4b7b10fa9fe83936);
+        assert_eq!(fnv1a(b"127.0.0.1"), 0xaabafe7104d914be);
+        assert_eq!(fnv1a(b"127.0.0.1\0"), 0xf4d3180b3cde3eda);
+        assert_eq!(fnv1a(b"127.0.0.2"), 0xaabafd7104d9130b);
+        assert_eq!(fnv1a(b"127.0.0.2\0"), 0xf4cfb20b3cdb5bb1);
+        assert_eq!(fnv1a(b"127.0.0.3"), 0xaabafc7104d91158);
+        assert_eq!(fnv1a(b"127.0.0.3\0"), 0xf4cc4c0b3cd87888);
+        assert_eq!(fnv1a(b"64.81.78.68"), 0xe729bac5d2a8d3a7);
+        assert_eq!(fnv1a(b"64.81.78.68\0"), 0x74bc0524f4dfa4c5);
+        assert_eq!(fnv1a(b"64.81.78.74"), 0xe72630c5d2a5b352);
+        assert_eq!(fnv1a(b"64.81.78.74\0"), 0x6b983224ef8fb456);
+        assert_eq!(fnv1a(b"64.81.78.84"), 0xe73042c5d2ae266d);
+        assert_eq!(fnv1a(b"64.81.78.84\0"), 0x8527e324fdeb4b37);
+        assert_eq!(fnv1a(b"feedface"), 0x0a83c86fee952abc);
+        assert_eq!(fnv1a(b"feedface\0"), 0x7318523267779d74);
+        assert_eq!(fnv1a(b"feedfacedaffdeed"), 0x3e66d3d56b8caca1);
+        assert_eq!(fnv1a(b"feedfacedaffdeed\0"), 0x956694a5c0095593);
+        assert_eq!(fnv1a(b"feedfacedeadbeef"), 0xcac54572bb1a6fc8);
+        assert_eq!(fnv1a(b"feedfacedeadbeef\0"), 0xa7a4c9f3edebf0d8);
+        assert_eq!(fnv1a(b"line 1\nline 2\nline 3"), 0x7829851fac17b143);
+        assert_eq!(fnv1a(b"chongo <Landon Curt Noll> /\\../\\"), 0x2c8f4c9af81bcf06);
+        assert_eq!(fnv1a(b"chongo <Landon Curt Noll> /\\../\\\0"), 0xd34e31539740c732);
+        assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\"), 0x3605a2ac253d2db1);
+        assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\\0"), 0x08c11b8346f4a3c3);
+        assert_eq!(fnv1a(b"http://antwrp.gsfc.nasa.gov/apod/astropix.html"), 0x6be396289ce8a6da);
+        assert_eq!(fnv1a(b"http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash"), 0xd9b957fb7fe794c5);
+        assert_eq!(fnv1a(b"http://epod.usra.edu/"), 0x05be33da04560a93);
+        assert_eq!(fnv1a(b"http://exoplanet.eu/"), 0x0957f1577ba9747c);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cam3/"), 0xda2cc3acc24fba57);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cams/HMcam/"), 0x74136f185b29e7f0);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/deformation.html"), 0xb2f2b4590edb93b2);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/images.html"), 0xb3608fce8b86ae04);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/maps.html"), 0x4a3a865079359063);
+        assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/volcanowatch/current_issue.html"), 0x5b3a7ef496880a50);
+        assert_eq!(fnv1a(b"http://neo.jpl.nasa.gov/risk/"), 0x48fae3163854c23b);
+        assert_eq!(fnv1a(b"http://norvig.com/21-days.html"), 0x07aaa640476e0b9a);
+        assert_eq!(fnv1a(b"http://primes.utm.edu/curios/home.php"), 0x2f653656383a687d);
+        assert_eq!(fnv1a(b"http://slashdot.org/"), 0xa1031f8e7599d79c);
+        assert_eq!(fnv1a(b"http://tux.wr.usgs.gov/Maps/155.25-19.5.html"), 0xa31908178ff92477);
+        assert_eq!(fnv1a(b"http://volcano.wr.usgs.gov/kilaueastatus.php"), 0x097edf3c14c3fb83);
+        assert_eq!(fnv1a(b"http://www.avo.alaska.edu/activity/Redoubt.php"), 0xb51ca83feaa0971b);
+        assert_eq!(fnv1a(b"http://www.dilbert.com/fast/"), 0xdd3c0d96d784f2e9);
+        assert_eq!(fnv1a(b"http://www.fourmilab.ch/gravitation/orbits/"), 0x86cd26a9ea767d78);
+        assert_eq!(fnv1a(b"http://www.fpoa.net/"), 0xe6b215ff54a30c18);
+        assert_eq!(fnv1a(b"http://www.ioccc.org/index.html"), 0xec5b06a1c5531093);
+        assert_eq!(fnv1a(b"http://www.isthe.com/cgi-bin/number.cgi"), 0x45665a929f9ec5e5);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/bio.html"), 0x8c7609b4a9f10907);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/index.html"), 0x89aac3a491f0d729);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/src/calc/lucas-calc"), 0x32ce6b26e0f4a403);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/venus2004.html"), 0x614ab44e02b53e01);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/vita.html"), 0xfa6472eb6eef3290);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/c/expert.html"), 0x9e5d75eb1948eb6a);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/calc/index.html"), 0xb6d12ad4a8671852);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/fnv/index.html"), 0x88826f56eba07af1);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/howhigh.html"), 0x44535bf2645bc0fd);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/number.html"), 0x169388ffc21e3728);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html"), 0xf68aac9e396d8224);
+        assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html#largest"), 0x8e87d7e7472b3883);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/corpspeak.cgi"), 0x295c26caa8b423de);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/haiku.cgi"), 0x322c814292e72176);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/rand-none.cgi"), 0x8a06550eb8af7268);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/randdist.cgi"), 0xef86d60e661bcf71);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/index.html"), 0x9e5426c87f30ee54);
+        assert_eq!(fnv1a(b"http://www.lavarnd.org/what/nist-test.html"), 0xf1ea8aa826fd047e);
+        assert_eq!(fnv1a(b"http://www.macosxhints.com/"), 0x0babaf9a642cb769);
+        assert_eq!(fnv1a(b"http://www.mellis.com/"), 0x4b3341d4068d012e);
+        assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/havoalert.cfm"), 0xd15605cbc30a335c);
+        assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/timelines_24.cfm"), 0x5b21060aed8412e5);
+        assert_eq!(fnv1a(b"http://www.paulnoll.com/"), 0x45e2cda1ce6f4227);
+        assert_eq!(fnv1a(b"http://www.pepysdiary.com/"), 0x50ae3745033ad7d4);
+        assert_eq!(fnv1a(b"http://www.sciencenews.org/index/home/activity/view"), 0xaa4588ced46bf414);
+        assert_eq!(fnv1a(b"http://www.skyandtelescope.com/"), 0xc1b0056c4a95467e);
+        assert_eq!(fnv1a(b"http://www.sput.nl/~rob/sirius.html"), 0x56576a71de8b4089);
+        assert_eq!(fnv1a(b"http://www.systemexperts.com/"), 0xbf20965fa6dc927e);
+        assert_eq!(fnv1a(b"http://www.tq-international.com/phpBB3/index.php"), 0x569f8383c2040882);
+        assert_eq!(fnv1a(b"http://www.travelquesttours.com/index.htm"), 0xe1e772fba08feca0);
+        assert_eq!(fnv1a(b"http://www.wunderground.com/global/stations/89606.html"), 0x4ced94af97138ac4);
+        assert_eq!(fnv1a(&repeat_10(b"21701")), 0xc4112ffb337a82fb);
+        assert_eq!(fnv1a(&repeat_10(b"M21701")), 0xd64a4fd41de38b7d);
+        assert_eq!(fnv1a(&repeat_10(b"2^21701-1")), 0x4cfc32329edebcbb);
+        assert_eq!(fnv1a(&repeat_10(b"\x54\xc5")), 0x0803564445050395);
+        assert_eq!(fnv1a(&repeat_10(b"\xc5\x54")), 0xaa1574ecf4642ffd);
+        assert_eq!(fnv1a(&repeat_10(b"23209")), 0x694bc4e54cc315f9);
+        assert_eq!(fnv1a(&repeat_10(b"M23209")), 0xa3d7cb273b011721);
+        assert_eq!(fnv1a(&repeat_10(b"2^23209-1")), 0x577c2f8b6115bfa5);
+        assert_eq!(fnv1a(&repeat_10(b"\x5a\xa9")), 0xb7ec8c1a769fb4c1);
+        assert_eq!(fnv1a(&repeat_10(b"\xa9\x5a")), 0x5d5cfce63359ab19);
+        assert_eq!(fnv1a(&repeat_10(b"391581216093")), 0x33b96c3cd65b5f71);
+        assert_eq!(fnv1a(&repeat_10(b"391581*2^216093-1")), 0xd845097780602bb9);
+        assert_eq!(fnv1a(&repeat_10(b"\x05\xf9\x9d\x03\x4c\x81")), 0x84d47645d02da3d5);
+        assert_eq!(fnv1a(&repeat_10(b"FEDCBA9876543210")), 0x83544f33b58773a5);
+        assert_eq!(fnv1a(&repeat_10(b"\xfe\xdc\xba\x98\x76\x54\x32\x10")), 0x9175cbb2160836c5);
+        assert_eq!(fnv1a(&repeat_10(b"EFCDAB8967452301")), 0xc71b3bc175e72bc5);
+        assert_eq!(fnv1a(&repeat_10(b"\xef\xcd\xab\x89\x67\x45\x23\x01")), 0x636806ac222ec985);
+        assert_eq!(fnv1a(&repeat_10(b"0123456789ABCDEF")), 0xb6ef0e6950f52ed5);
+        assert_eq!(fnv1a(&repeat_10(b"\x01\x23\x45\x67\x89\xab\xcd\xef")), 0xead3d8a0f3dfdaa5);
+        assert_eq!(fnv1a(&repeat_10(b"1032547698BADCFE")), 0x922908fe9a861ba5);
+        assert_eq!(fnv1a(&repeat_10(b"\x10\x32\x54\x76\x98\xba\xdc\xfe")), 0x6d4821de275fd5c5);
+        assert_eq!(fnv1a(&repeat_500(b"\x00")), 0x1fe3fce62bd816b5);
+        assert_eq!(fnv1a(&repeat_500(b"\x07")), 0xc23e9fccd6f70591);
+        assert_eq!(fnv1a(&repeat_500(b"~")), 0xc1af12bdfe16b5b5);
+        assert_eq!(fnv1a(&repeat_500(b"\x7f")), 0x39e9f18f2f85e221);
+    }
+}
diff --git a/rustc_deps/vendor/futures/.cargo-checksum.json b/rustc_deps/vendor/futures/.cargo-checksum.json
new file mode 100644
index 0000000..6c92479
--- /dev/null
+++ b/rustc_deps/vendor/futures/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{".travis.yml":"78be1e09bb9fa7465e4c6ab149cf4cca8eca89c584afa7d8a385e1785c9096f2","CHANGELOG.md":"081044d6883e82c3c5a288e0cf0e839acfffbc329c6170cecbf436d163b3390c","Cargo.toml":"a16be26e16b8bd9838eed9a89adb6f3e3934e428ba4b0463714b3f3b7326a75a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"52248a83513c979d501093d0621ff76a291e169fcdbc2ec2d6a25672fd86fe71","appveyor.yml":"7b8de5d694cb575649354d7fc3eff0781e2c5c412df4bc8a90b36b6fdb55bfab","benches/bilock.rs":"60b9e0814b8396e0320d299273c6f91c2ccc09a2bb59eec92df74a1f0919e54f","benches/futures_unordered.rs":"fa2d3b5e6cdfe1e941d78c119a696fb583341fa0a0895ec2692e6d374ceb9a0e","benches/poll.rs":"ca369079c4db366a180be22f406eaf8e94e2e771c02568eb35d89e63093006cf","benches/sync_mpsc.rs":"8d4dbf78afcdf61fc72da326c4810bc797462771707d079f95a7f75aa2ec0ec0","benches/thread_notify.rs":"1992b1e2b352fbc15a611d1318ac1bf6f19318d769086d55c80e6863f1b0e106","src/executor.rs":"80466c075daf030e07cc0d053618837cb73c07f5399b3d65016925f4488adb73","src/future/and_then.rs":"15653d392d331a1fc4619129f737acc28525c88d1675b7fcea6ed27c5b1bf302","src/future/catch_unwind.rs":"dfef6b6a66c09574338046cf23b0c6aacd8200872d512b831d6dc12038f05298","src/future/chain.rs":"4d712e989e079f4164d5d9fe3bb522d521094b0d8083ee639350570444e5bb93","src/future/either.rs":"d8d3a4686dfe0068cc35ee452268ff2406e1e6adfddd3f0841988bfa6489ca5d","src/future/empty.rs":"b549a1ca0f21bc6d1a26d9063a9a60deb9235ff7eff5db915050115fed91a9c7","src/future/flatten.rs":"7eb15429fcc749326371fe571e1f7d294d7b83f7557e6e1971e2206180253d65","src/future/flatten_stream.rs":"cf914425c3606b61c046df5c43d64266d6f2328693e4122441f9bbcf7cb0a4e1","src/future/from_err.rs":"a1f42d95f7b52e80c2e5a03b44cbce0efbe5fc486dfe33d799b74ab9ba9057ab","src/future/fuse.rs":"3920c819b850c8f04b3868eae70dc0d3e6802ff0b517501f3aa5057a3b632102","src/future/inspect.rs":"89c362d8402dddd784bcc54e62ca27657ca8108e1ae8de5a7237e08650e10636","src/future/into_stream.rs":"0fa6bc4d70e8b4d75cf45fba53b39f033b87574103fffea4090b78f049bf43d0","src/future/join.rs":"b1dcefb03b1cb4e609ad2e79ba9a6cfab24235d7a4fff7fb9daf2c8fbf0f3d70","src/future/join_all.rs":"30fc27cbc1248046937b441a165a911e9ed1cd887ad6f3aeeb573b59c43e9cbf","src/future/lazy.rs":"1a2025bae3675fb682cefbf8a88bbb7a7519cfdee42dd6b3049a4d2b7ab8b5b1","src/future/loop_fn.rs":"5bd952247ae4e9d31dff77386bbd3700f596da136ea53e9e9944266af3f08688","src/future/map.rs":"91e148d9adaea929b85ede63c71fb07ef9b5611db906a13eedad2cf551745b47","src/future/map_err.rs":"2c8e87fa8ff56061722db6c69aaba588e6df6835a4e2fe84826f0bd4fed2e007","src/future/mod.rs":"362679ce9e4d55952eced756b8b69c017b966d2027a2c729ce20c382f1f86109","src/future/option.rs":"93270226cadcfa349250023e2070e687cf595831f427904ca744f7bc50342ded","src/future/or_else.rs":"444567101c4c437b184aa2e2eec0cf4363af442c0afc58d6508d3d2ac86489a9","src/future/poll_fn.rs":"817bfb75e7c43ca96a53e8cc9f48606c92c3c6742b07a732ce79a8f9b7bf8808","src/future/result.rs":"cc62c2377defb7b53aa859bf05c41c52a9cf8583378b7072bb2b45232d5fc9c5","src/future/select.rs":"73efd98004d5d8c46607bf770ff07a810bcdbe05cce0e8e4f41f5e659fd44203","src/future/select2.rs":"cfbbf3a9794109c56a3703456fae6111826bc25f98f2f36b234d483eeeeab482","src/future/select_all.rs":"b009e57ac241a3aba78db0bb751432cb99c1e91b8bae1b3baf225921f0daa441","src/future/select_ok.rs":"4884896914d8903edbfa12b5e255d35d5b2c91a9182ce6f774978db636617905","src/future/shared.rs":"1c406e4fbdd364a90d3ce57f207efd78491e9618ceeb86f3c0f3820c30a1ea21","src/future/then.rs":"c49b388ab3c78979ad9ae40f6e859ee98e9351bdb11e3c3f1ad4ceca77651a56","src/lib.rs":"f9f0855d61bb06f9b1df0bf56220a41c5ab5c721a72ef24cf46e2dccb5153c7a","src/lock.rs":"fe4c8185f9774a134d4ce27af4a9c8b25f30f7dcc6990473210d66b6b8936ce4","src/poll.rs":"df74c3a8169d7895f3c46dd6de99edd77bd024b85e26b1d0644d2b8e5ef515b9","src/resultstream.rs":"365bc127c0410badb58ea2beb2abae546968ba3ac91abe2140e93e0c3620228f","src/sink/buffer.rs":"17e6bad2434f31630494a9a98e40a287da8a603515885ab8a17199ab0e5f8e46","src/sink/fanout.rs":"1fbcabdb1d22a43919417790082dc27ac65e2a100263504b6664a0b5e0657ae1","src/sink/flush.rs":"6c9a3bb9705c740e601ca6101cf6e6a87f2568661cff39a3576ef55986e3cb60","src/sink/from_err.rs":"b6d6e43c1f90c70bc1576ac2c9f1a7777fc07eef419721850962d896ac6cc3de","src/sink/map_err.rs":"b34a60880336b536666c1047f1919dd90eeed10b869e9c679fa928a3d5321112","src/sink/mod.rs":"4b4d80d008bfa8d0abc83cd640dc9c107423c7920795678c079c544c037ab632","src/sink/send.rs":"019f3f8ab450edc0adb864e4b819f5b0d4cfe9dc33a53093c2aa18e1eb6270dc","src/sink/send_all.rs":"b05047459faceecf0dfd5e6280014c31f5a2a1058974785db8ede497c10a1e79","src/sink/wait.rs":"9c70fdd54c642e4ecf7d9b0ff1fbb2df9c89349dfd60b5482748cd93c6dc301e","src/sink/with.rs":"a122cc26108cb3396db12cb2107c576d366c61191f656acedd5ff6c65165fcfc","src/sink/with_flat_map.rs":"7b0f367d98a99d297c3ce097e9858ad7b0dfdafbb66516cba0767b62beb01af3","src/stream/and_then.rs":"9f0f6ee06343ab03eebcb71257963e76d8e7208e4015b402cc8a58f793e37d79","src/stream/buffer_unordered.rs":"057c3dec32baf451ef02f44ef849086637e4d2cbb2d65907cc15ed9398fe131b","src/stream/buffered.rs":"4ced19e37e47182d5f9c7f852a7906c35b71ac4a5b2774a9101859defbecb190","src/stream/catch_unwind.rs":"957b935645f1744a4741962772c15e94370153f33e0db356309bf98ebb599c37","src/stream/chain.rs":"0b6b06cf5aaf0c2f665c61c65766d6113e24f690ebd9ad3a89abfa521e2ce9b2","src/stream/channel.rs":"f728402228fea0be01ec5cf1d02e49e52666c0c9ea986708d18e24f30376f6de","src/stream/chunks.rs":"6c68b006670f2ea227231ba9a7986c46b4f798a871a3de62dd00acfb84c3435b","src/stream/collect.rs":"e770850c7ed2d458b521c12af4ee76adf2303919849d2f95fa93fdf574c86d37","src/stream/concat.rs":"39549687b589562ce713a999e2887b6f20ec8f87291d82ee8b1a48dd7dfe9c8e","src/stream/empty.rs":"e8e2820fd3b2329a6987a11c3b3f28849f49427d1a745f2bdc7a4982476514e7","src/stream/filter.rs":"4abaf6c7bd3ecbccf7deac7920cc6bdc1b17875bedf7c6acd7e702254b3b83ba","src/stream/filter_map.rs":"573079f98efc38bbc68746084702b952ccb035bd8238c3c30fa103979865ed0e","src/stream/flatten.rs":"f2edce326745373c9c524bb574ce18584be95c7fd1a0ef875256b39891219b18","src/stream/fold.rs":"7f397373ed66560ff1eb0cffc5dafaf1569d3c8155fe418cc2bf6fc33faec230","src/stream/for_each.rs":"bd7f96bf551a829e37a54fd529e0b68a8868480797df039c75e1f226639cf096","src/stream/forward.rs":"5dd07a3d85130554f6c0c950fd635e4594f43a0284440f6f1af2a240511c5621","src/stream/from_err.rs":"bde1791790030c480aa88c6f7b235703d5b400249c841c8b045ea2203728b96c","src/stream/fuse.rs":"5d544151de7e5a3ce8a47bdeabe5cc9beaf0937b1eeed67e8d76842f54dea65d","src/stream/future.rs":"8f72146483c0423cbc11d45c76ee219ed12d940164c83199bb85cd6d5d64c22d","src/stream/futures_ordered.rs":"3e41623352600e116c327fe37005da04b0dcf1d5db379cab147738a1383732d8","src/stream/futures_unordered.rs":"3a445ebf5815ecbafaef6dab011cc3edf012564082717a615b70425e78142e1e","src/stream/inspect.rs":"4a1e7d7bbb0842a7021c5145bb1b64dbc213cfdccff51fe8399e3120c123eab5","src/stream/inspect_err.rs":"b4f2bc6a139df8f8eb403aafbca91c05b3093d3a6e13cef034a639fbe3ebe01e","src/stream/iter.rs":"cfff6b28759ccf390e8367f9f63209133c16e7fa53c7ae71167f318ba3ec624b","src/stream/iter_ok.rs":"5165cb02972776515734e0f343e626fbb448b65b38cdeacffbd86116f3c3cd37","src/stream/iter_result.rs":"9db38b1066d9adc1ece496432127049d36fb4b9895660c2af2b7ac28510c9084","src/stream/map.rs":"ba16b1469e519377939cf3bd073b258ac41e6349aab1c59393e3b30178a56496","src/stream/map_err.rs":"5ce9a279fde1f4f0887435856e1efa4fdeda749d43f4bab658b0abd216bc0a6f","src/stream/merge.rs":"63bb60ca386e280985cee8e16ae8b07f02d57aa8a0fa877ae01fb8b4678366d0","src/stream/mod.rs":"4017d01e3d1ae009d1ea5da53499ad8fa0b6e90d5a7693fab9acc239712445ef","src/stream/once.rs":"277c960dc4bfa09fcc6112efa4e38a9fe937dc31fff440405e60bfd843f3c1ab","src/stream/or_else.rs":"c11ea499d85d6204ad083058eeca9dbf29873c49ee21bf01f9fe53e9ec3bba52","src/stream/peek.rs":"25d78baa0b3e30d2d1c72d1f3b1aa2a28811522d345dceefec587beb18b70fe2","src/stream/poll_fn.rs":"1dffbe60bd50c19efb71de2f768eecf70fa280b0d9c9cb889d16bb43b1619c8b","src/stream/repeat.rs":"807f2be5c9c1e7d54954f73ee38a373e71177aca43be8866712798f29ab541c2","src/stream/select.rs":"027873d9142e896272f7471cccaaccb133bf9f696a3f7510f3fb1aa4253a7c09","src/stream/skip.rs":"d7c839ca15f830709ebedd9526bb9ebd64ee22cb944e44213ce850a1383b71fa","src/stream/skip_while.rs":"aeb9bd64530bfaa631f4ca9500861c62fbf32849b09383eb26904bedd8b8b269","src/stream/split.rs":"c9b391fcbf3d1762bde442fd3549bd4739d2f9f486e88063650d42fea33c6af3","src/stream/take.rs":"9872429dd89cb34755b514abde9b6a876da076aea0449fcadfcc48e982507f21","src/stream/take_while.rs":"36bc2a33850ba2b58fb0da3866c96c8f4dfbd81133e615fda031518e71d425b5","src/stream/then.rs":"c7c66e27180cf2d98694de27504283a32444a0d0d6919ab25b3621fa6169408d","src/stream/unfold.rs":"5e69718714cc38c5ca6d0a6f5243ab28e392bdc97d96e8ab9059d9f0e772120c","src/stream/wait.rs":"936a15df4499d188f210cb0133bc8ad25e33e5b674a96105b4da549f32e92b40","src/stream/zip.rs":"33f1401683a29ce194927533c40bdbbc0783c552cf0b666f268fa7109e593853","src/sync/bilock.rs":"def09b26f9d66f2be0a8885ad6cf7106c3a073493bad591fc4a068212f0d739f","src/sync/mod.rs":"27ad26777f600f7054215fccdff07f4303182af2a6e0998d4229d62b090b7aac","src/sync/mpsc/mod.rs":"97542ef9fcbe338f2ac0ce982a9af11883aded33d3b4ce34a788cf98e00a7d3f","src/sync/mpsc/queue.rs":"b39889f1b2000a3de995a50f46243f97a98d3cce7c6de4b95c4d8ffeb42af918","src/sync/oneshot.rs":"5d41f1d19b78ada7d5587d0fb5751de5886281cf59889ba1b77cbde399975f1f","src/task.rs":"914955224ba1613835027e6d6436b83ce41caf217428c2c576e8783cacc7ba96","src/task_impl/atomic_task.rs":"1a1cf99a0220116f2f28742acd82b17b8f46e4ec737238ac0609d5581cac3084","src/task_impl/core.rs":"3ababa3970da5668f2b678724a4b5e1aa5f2b65a2355276b7d14ba3dfdd52686","src/task_impl/mod.rs":"2799e3997ca43d00ee6c32f79882e5a07b5610b86338e2388d974f2d4c69d52b","src/task_impl/std/data.rs":"9b6210811c095c4d0ec0f59a566bb8f5bc4b6ba544c72a4565dc47f3b7fbfab9","src/task_impl/std/mod.rs":"e66075507ee5b8850a5084507d8856ea820be1a063300155dace2052b4a7f825","src/task_impl/std/task_rc.rs":"a6e46e79fecb1497d603c016f4f1b14523346f74af800c9c27c069229d62dc25","src/task_impl/std/unpark_mutex.rs":"7a53b7209ff00880bce9d912c249b077870625ca87fe9ab7b0f441d3af430302","src/unsync/mod.rs":"e5da32f78212646f0161fec2e7193cda830f541bc9ae37361fbcf82e99cc1d86","src/unsync/mpsc.rs":"ef63328496eeaa6575a17525193c6093e7803df3a64355a40f0187119ca1d731","src/unsync/oneshot.rs":"89661388a87d4ac83befc31df9ad11e6a8c6104e2dde7be9e3585d7549cfe8c4","tests/all.rs":"99c6ad1d1e16ad2e0bc3027e1f5cb1a8f89404f71d77d3fc85badb67278f8179","tests/bilock.rs":"68462100c0c1e4e72f220d96ce1e6b25648f4c10a390be8a3bbfa99bbd795f31","tests/buffer_unordered.rs":"50ceb305da08fa095ee40a8f145fa9d95db59372cca949d77f011bbabc072152","tests/channel.rs":"63d6ab1b7fd51680562f9d626a5fab9d4b81226272b5e0f9ca7faa88eae5073a","tests/eager_drop.rs":"e0a615c39f1fb9baae543212e72a165f68e7576f6b8c6db1809149d819bd546b","tests/eventual.rs":"73cbd3836a598175439b5dc5597f7e464dfbc6d77379aaae1172c6c7f85220e5","tests/fuse.rs":"feba43c51cbeeb383f6ebba4a4c75107de69a3cdb3eadb3e673fbeb5a91f9ac4","tests/future_flatten_stream.rs":"133b91a9e2170849ed7dbcb4024675873a781bf2dd190cfcaa9c41418c3ccb97","tests/futures_ordered.rs":"7835bf9bedb9322a93070b5d87886b7a333dc469aee74f7eb86a1a7914b4602c","tests/futures_unordered.rs":"048153d9c4ec3433efbb97edfe01a458762e76160624362c658432f6f2357524","tests/inspect.rs":"d7706a175be9ed6ecc09d7a45e1559160e00da85fa8a9a7caec4c53918999842","tests/mpsc-close.rs":"62c1d2acaf60e3e896471fef6a507a125b336c04781237de8dc9d13e59cfa9fc","tests/mpsc.rs":"46488138956c2293680b3282e2001a413631728638760ee0073daa5e6f75de5a","tests/oneshot.rs":"a8773b3a65e79944045118f36bfd81fceb826d4e2846b46f86db37a02d7ae1f4","tests/ready_queue.rs":"3d50c4e71e3954c5b8e2672255b6af33abaebc16172c038e64c3323d633693c0","tests/recurse.rs":"4922e1ad975dca9d6b63d155515cc24181ad6a915adcbb743f7c8a58c0148a77","tests/select_all.rs":"3666e95ea94da17abb1899101e51b294af576bc446119fbc8aea5bb2991f439a","tests/select_ok.rs":"7a740e5b2d70c7776202ed1495b016f6e63ae1de06ca0f12ab21fcb3117450a9","tests/shared.rs":"4abb7c9a7f6207e40bc7408ee405df4e5a3e778054ceb113b4a177a886a64d11","tests/sink.rs":"7da8db7fb7c4f4f259d2d520b92a121de0a750b389217c8a4a02070bd0006423","tests/split.rs":"24dd293f049a37bfaabb02ae558c81e9fef9298a2ce43ecb544450b045c15f5c","tests/stream.rs":"3ca52f06a4503a853acce77997e4e744903c2084a83e0abf1e704e4f73833805","tests/stream_catch_unwind.rs":"6cee77f455a671d038aac24cf2f79636f1c0a5d8900957a2fed0ee3ed99832b8","tests/support/local_executor.rs":"10ca7f0bc1d9fd45350a807cfd76015fe24bf68d9a711e16ea0ec6be22af9ddd","tests/support/mod.rs":"1961189f57851a468e518327da0b7893eee990e477b82a278e0015f25b5e5a1c","tests/unfold.rs":"27ff8c3c83b333094bbffe6aebadf3730f0e35d1367b7b602a3df4e233d934d8","tests/unsync-oneshot.rs":"e676b37a64e1d6c0816d55cf443d86249ec2ff8180f1fc0d009de51e6842dac8","tests/unsync.rs":"89c335c6d8764ea12bc8ae75b6df717b8c697863764353a55faf884eaeb24699"},"package":"49e7653e374fe0d0c12de4250f0bdb60680b8c80eed558c5c7538eec9c89e21b"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/futures/.travis.yml b/rustc_deps/vendor/futures/.travis.yml
new file mode 100644
index 0000000..4f007e9
--- /dev/null
+++ b/rustc_deps/vendor/futures/.travis.yml
@@ -0,0 +1,32 @@
+language: rust
+
+matrix:
+  include:
+    - os: osx
+    - rust: stable
+    - rust: beta
+    - rust: nightly
+      env: BENCH=1
+    - os: linux
+      rust: 1.15.0
+      script: cargo test
+sudo: false
+script:
+  - cargo build
+  - cargo build --no-default-features
+  - cargo test
+  - cargo test --no-default-features --features use_std
+  - cargo test --manifest-path futures-cpupool/Cargo.toml
+  - cargo test --manifest-path futures-cpupool/Cargo.toml --no-default-features
+
+  - cargo doc --no-deps
+  - cargo doc --no-deps --manifest-path futures-cpupool/Cargo.toml
+  - if [ "$BENCH" = "1" ]; then cargo bench; fi
+  - if [[ "$TRAVIS_RUST_VERSION" == nightly ]]; then cargo test --features nightly; fi
+env:
+  global:
+    - secure: "iwVcMVIF7ZSY82fK5UyyUvVvJxMSYrbZawh1+4Oi8pvOdYq1gptcDoOC8jxWwCwrNF1b+/85n+jlEUngEqqSmV5PjAbWPjoc+u4Zn7CRi1AlxoUlvHPiQm4vM4Mkkd6GsqoIZttCeedU9m/w0nQ18uUtK8uD6vr2FVdcMnUnkYQAxuGOowGLrwidukzfBXMCu/JrwKMIbt61knAFiI/KJknu0h1mRrhpeF/sQ3tJFzRRcQeFJkbfwDzltMpPo1hq5D3HI4ONjYi/qO2pwUhDk4umfp9cLW9MS8rQvptxJTQmWemHi+f2/U4ld6a0URL6kEuMkt/EbH0A74eFtlicfRs44dX9MlWoqbLypnC3ymqmHcpwcwNA3HmZyg800MTuU+BPK41HIPdO9tPpxjHEiqvNDknH7qs+YBnis0eH7DHJgEjXq651PjW7pm+rnHPwsj+OzKE1YBNxBQZZDkS3VnZJz+O4tVsOzc3IOz0e+lf7VVuI17C9haj117nKp3umC4MVBA0S8RfreFgqpyDeY2zwcqOr0YOlEGGRl0vyWP8Qcxx12kQ7+doLolt6Kxda4uO0hKRmIF6+qki1T+L7v8BOGOtCncz4f7IX48eQ7+Wu0OtglRn45qAa3CxjUuW6xX3KSNH66PCXV0Jtp8Ga2SSevX2wtbbFu9f+9R+PQY4="
+
+notifications:
+  email:
+    on_success: never
diff --git a/rustc_deps/vendor/futures/CHANGELOG.md b/rustc_deps/vendor/futures/CHANGELOG.md
new file mode 100644
index 0000000..6162823
--- /dev/null
+++ b/rustc_deps/vendor/futures/CHANGELOG.md
@@ -0,0 +1,294 @@
+**Note**: This CHANGELOG is no longer maintained for newer 0.1.x releases.
+See instead the github release tags and individual git commits.
+
+-----
+
+# 0.1.17 - 2017-10-31
+
+* Add a `close` method on `sink::Wait`
+* Undeprecate `stream::iter` as `stream::iter_result`
+* Improve performance of wait-related methods
+* Tweak buffered sinks with a 0 capacity to forward directly to the underlying
+  sink.
+* Add `FromIterator` implementation for `FuturesOrdered` and `FuturesUnordered`.
+
+# 0.1.16 - 2017-09-15
+
+* A `prelude` module has been added to glob import from and pick up a whole
+  bunch of useful types
+* `sync::mpsc::Sender::poll_ready` has been added as an API
+* `sync::mpsc::Sender::try_send` has been added as an API
+
+# 0.1.15 - 2017-08-24
+
+* Improve performance of `BiLock` methods
+* Implement `Clone` for `FutureResult`
+* Forward `Stream` trait through `SinkMapErr`
+* Add `stream::futures_ordered` next to `futures_unordered`
+* Reimplement `Stream::buffered` on top of `stream::futures_ordered` (much more
+  efficient at scale).
+* Add a `with_notify` function for abstractions which previously required
+  `UnparkEvent`.
+* Add `get_ref`/`get_mut`/`into_inner` functions for stream take/skip methods
+* Add a `Clone` implementation for `SharedItem` and `SharedError`
+* Add a `mpsc::spawn` function to spawn a `Stream` into an `Executor`
+* Add a `reunite` function for `BiLock` and the split stream/sink types to
+  rejoin two halves and reclaim the original item.
+* Add `stream::poll_fn` to behave similarly to `future::poll_fn`
+* Add `Sink::with_flat_map` like `Iterator::flat_map`
+* Bump the minimum Rust version to 1.13.0
+* Expose `AtomicTask` in the public API for managing synchronization around task
+  notifications.
+* Unify the `Canceled` type of the `sync` and `unsync` modules.
+* Deprecate the `boxed` methods. These methods have caused more confusion than
+  they've solved historically, so it's recommended to use a local extension
+  trait or a local helper instead of the trait-based methods.
+* Deprecate the `Stream::merge` method as it's less ergonomic than `select`.
+* Add `oneshot::Sender::is_canceled` to test if a oneshot is canceled off a
+  task.
+* Deprecates `UnboundedSender::send` in favor of a method named `unbounded_send`
+  to avoid a conflict with `Sink::send`.
+* Deprecate the `stream::iter` function in favor of an `stream::iter_ok` adaptor
+  to avoid the need to deal with `Result` manually.
+* Add an `inspect` function to the `Future` and `Stream` traits along the lines
+  of `Iterator::inspect`
+
+# 0.1.14 - 2017-05-30
+
+This is a relatively large release of the `futures` crate, although much of it
+is from reworking internals rather than new APIs. The banner feature of this
+release is that the `futures::{task, executor}` modules are now available in
+`no_std` contexts! A large refactoring of the task system was performed in
+PR #436 to accommodate custom memory allocation schemes and otherwise remove
+all dependencies on `std` for the task module. More details about this change
+can be found on the PR itself.
+
+Other API additions in this release are:
+
+* A `FuturesUnordered::push` method was added and the `FuturesUnordered` type
+  itself was completely rewritten to efficiently track a large number of
+  futures.
+* A `Task::will_notify_current` method was added with a slightly different
+  implementation than `Task::is_current` but with stronger guarantees and
+  documentation wording about its purpose.
+* Many combinators now have `get_ref`, `get_mut`, and `into_inner` methods for
+  accessing internal futures and state.
+* A `Stream::concat2` method was added which should be considered the "fixed"
+  version of `concat`, this one doesn't panic on empty streams.
+* An `Executor` trait has been added to represent abstracting over the concept
+  of spawning a new task. Crates which only need the ability to spawn a future
+  can now be generic over `Executor` rather than requiring a
+  `tokio_core::reactor::Handle`.
+
+As with all 0.1.x releases this PR is intended to be 100% backwards compatible.
+All code that previously compiled should continue to do so with these changes.
+As with other changes, though, there are also some updates to be aware of:
+
+* The `task::park` function has been renamed to `task::current`.
+* The `Task::unpark` function has been renamed to `Task::notify`, and in general
+  terminology around "unpark" has shifted to terminology around "notify"
+* The `Unpark` trait has been deprecated in favor of the `Notify` trait
+  mentioned above.
+* The `UnparkEvent` structure has been deprecated. It currently should perform
+  the same as it used to, but it's planned that in a future 0.1.x release the
+  performance will regress for crates that have not transitioned away. The
+  primary primitive to replace this is the addition of a `push` function on the
+  `FuturesUnordered` type. If this does not help implement your use case though,
+  please let us know!
+* The `Task::is_current` method is now deprecated, and you likely want to use
+  `Task::will_notify_current` instead, but let us know if this doesn't suffice!
+
+# 0.1.13 - 2017-04-05
+
+* Add forwarding sink/stream impls for `stream::FromErr` and `sink::SinkFromErr`
+* Add `PartialEq` and `Eq` to `mpsc::SendError`
+* Reimplement `Shared` with `spawn` instead of `UnparkEvent`
+
+# 0.1.12 - 2017-04-03
+
+* Add `Stream::from_err` and `Sink::from_err`
+* Allow `SendError` to be `Clone` when possible
+
+# 0.1.11 - 2017-03-13
+
+The major highlight of this release is the addition of a new "default" method on
+the `Sink` trait, `Sink::close`. This method is used to indicate to a sink that
+no new values will ever need to get pushed into it. This can be used to
+implement graceful shutdown of protocols and otherwise simply indicates to a
+sink that it can start freeing up resources.
+
+Currently this method is **not** a default method to preserve backwards
+compatibility, but it's intended to become a default method in the 0.2 series of
+the `futures` crate. It's highly recommended to audit implementations of `Sink`
+to implement the `close` method as is fit.
+
+Other changes in this release are:
+
+* A new select combinator, `Future::select2` was added for a heterogeneous
+  select.
+* A `Shared::peek` method was added to check to see if it's done.
+* `Sink::map_err` was implemented
+* The `log` dependency was removed
+* Implementations of the `Debug` trait are now generally available.
+* The `stream::IterStream` type was renamed to `stream::Iter` (with a reexport
+  for the old name).
+* Add a `Sink::wait` method which returns an adapter to use an arbitrary `Sink`
+  synchronously.
+* A `Stream::concat` method was added to concatenate a sequence of lists.
+* The `oneshot::Sender::complete` method was renamed to `send` and now returns a
+  `Result` indicating successful transmission of a message or not. Note that the
+  `complete` method still exists, it's just deprecated.
+
+# 0.1.10 - 2017-01-30
+
+* Add a new `unsync` module which mirrors `sync` to the extent that it can but
+  is intended to not perform cross-thread synchronization (only usable within
+  one thread).
+* Tweak `Shared` to work when handles may not get poll'd again.
+
+# 0.1.9 - 2017-01-18
+
+* Fix `Send/Sync` of a few types
+* Add `future::tail_fn` for more easily writing loops
+* Export SharedItem/SharedError
+* Remove an unused type parameter in `from_err`
+
+# 0.1.8 - 2017-01-11
+
+* Fix some race conditions in the `Shared` implementation
+* Add `Stream::take_while`
+* Fix an unwrap in `stream::futures_unordered`
+* Generalize `Stream::for_each`
+* Add `Stream::chain`
+* Add `stream::repeat`
+* Relax `&mut self` to `&self` in `UnboundedSender::send`
+
+# 0.1.7 - 2016-12-18
+
+* Add a `Future::shared` method for creating a future that can be shared
+  amongst threads by cloning the future itself. All derivative futures
+  will resolve to the same value once the original future has been
+  resolved.
+* Add a `FutureFrom` trait for future-based conversion
+* Fix a wakeup bug in `Receiver::close`
+* Add `future::poll_fn` for quickly adapting a `Poll`-based function to
+  a future.
+* Add an `Either` enum with two branches to easily create one future
+  type based on two different futures created on two branches of control
+  flow.
+* Remove the `'static` bound on `Unpark`
+* Optimize `send_all` and `forward` to send as many items as possible
+  before calling `poll_complete`.
+* Unify the return types of the `ok`, `err`, and `result` future to
+  assist returning different varieties in different branches of a function.
+* Add `CpuFuture::forget` to allow the computation to continue running
+  after a drop.
+* Add a `stream::futures_unordered` combinator to turn a list of futures
+  into a stream representing their order of completion.
+
+# 0.1.6 - 2016-11-22
+
+* Fix `Clone` bound on the type parameter on `UnboundedSender`
+
+# 0.1.5 - 2016-11-22
+
+* Fix `#![no_std]` support
+
+# 0.1.4 - 2016-11-22
+
+This is quite a large release relative to the previous point releases! As
+with all 0.1 releases, this release should be fully compatible with the 0.1.3
+release. If any incompatibilities are discovered please file an issue!
+
+The largest changes in 0.1.4 are the addition of a `Sink` trait coupled with a
+reorganization of this crate. Note that all old locations for types/traits
+still exist, they're just deprecated and tagged with `#[doc(hidden)]`.
+
+The new `Sink` trait is used to represent types which can periodically over
+time accept items, but may take some time to fully process the item before
+another can be accepted. Essentially, a sink is the opposite of a stream. This
+trait will then be used in the tokio-core crate to implement simple framing by
+modeling I/O streams as both a stream and a sink of frames.
+
+The organization of this crate is to now have three primary submodules,
+`future`, `stream`, and `sink`. The traits as well as all combinator types are
+defined in these submodules. The traits and types like `Async` and `Poll` are
+then reexported at the top of the crate for convenient usage. It should be a
+relatively rare occasion that the modules themselves are reached into.
+
+Finally, the 0.1.4 release comes with a new module, `sync`, in the futures
+crate.  This is intended to be the home of a suite of futures-aware
+synchronization primitives. Currently this is inhabited with a `oneshot` module
+(the old `oneshot` function), a `mpsc` module for a new multi-producer
+single-consumer channel, and a `BiLock` type which represents sharing ownership
+of one value between two consumers. This module may expand over time with more
+types like a mutex, rwlock, spsc channel, etc.
+
+Notable deprecations in the 0.1.4 release that will be deleted in an eventual
+0.2 release:
+
+* The `TaskRc` type is now deprecated in favor of `BiLock` or otherwise `Arc`
+  sharing.
+* All future combinators should be accessed through the `future` module, not
+  the top-level of the crate.
+* The `Oneshot` and `Complete` types are now replaced with the `sync::oneshot`
+  module.
+* Some old names like `collect` are deprecated in favor of more appropriately
+  named versions like `join_all`
+* The `finished` constructor is now `ok`.
+* The `failed` constructor is now `err`.
+* The `done` constructor is now `result`.
+
+As always, please report bugs to https://github.com/rust-lang-nursery/futures-rs and
+we always love feedback! If you've got situations we don't cover, combinators
+you'd like to see, or slow code, please let us know!
+
+Full changelog:
+
+* Improve scalability of `buffer_unordered` combinator
+* Fix a memory ordering bug in oneshot
+* Add a new trait, `Sink`
+* Reorganize the crate into three primary modules
+* Add a new `sync` module for synchronization primitives
+* Add a `BiLock` sync primitive for two-way sharing
+* Deprecate `TaskRc`
+* Rename `collect` to `join_all`
+* Use a small vec in `Events` for improved clone performance
+* Add `Stream::select` for selecting items from two streams like `merge` but
+  requiring the same types.
+* Add `stream::unfold` constructor
+* Add a `sync::mpsc` module with a futures-aware multi-producer single-consumer
+  queue. Both bounded (with backpressure) and unbounded (no backpressure)
+  variants are provided.
+* Renamed `failed`, `finished`, and `done` combinators to `err`, `ok`, and
+  `result`.
+* Add `Stream::forward` to send all items to a sink, like `Sink::send_all`
+* Add `Stream::split` for streams which are both sinks and streams to have
+  separate ownership of the stream/sink halves
+* Improve `join_all` with concurrency
+
+# 0.1.3 - 2016-10-24
+
+* Rewrite `oneshot` for efficiency and removing allocations on send/recv
+* Errors are passed through in `Stream::take` and `Stream::skip`
+* Add a `select_ok` combinator to pick the first of a list that succeeds
+* Remove the unnecessary `SelectAllNext` typedef
+* Add `Stream::chunks` for receiving chunks of data
+* Rewrite `stream::channel` for efficiency, correctness, and removing
+  allocations
+* Remove `Send + 'static` bounds on the `stream::Empty` type
+
+# 0.1.2 - 2016-10-04
+
+* Fixed a bug in drop of `FutureSender`
+* Expose the channel `SendError` type
+* Add `Future::into_stream` to convert to a single-element stream
+* Add `Future::flatten_to_stream` to convert a future of a stream to a stream
+* impl Debug for SendError
+* Add stream::once for a one element stream
+* Accept IntoIterator in stream::iter
+* Add `Stream::catch_unwind`
+
+# 0.1.1 - 2016-09-09
+
+Initial release!
diff --git a/rustc_deps/vendor/futures/Cargo.toml b/rustc_deps/vendor/futures/Cargo.toml
new file mode 100644
index 0000000..3c295a4
--- /dev/null
+++ b/rustc_deps/vendor/futures/Cargo.toml
@@ -0,0 +1,37 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "futures"
+version = "0.1.25"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+description = "An implementation of futures and streams featuring zero allocations,\ncomposability, and iterator-like interfaces.\n"
+homepage = "https://github.com/rust-lang-nursery/futures-rs"
+documentation = "https://docs.rs/futures"
+readme = "README.md"
+keywords = ["futures", "async", "future"]
+categories = ["asynchronous"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-lang-nursery/futures-rs"
+
+[dependencies]
+
+[features]
+default = ["use_std", "with-deprecated"]
+nightly = []
+use_std = []
+with-deprecated = []
+[badges.appveyor]
+repository = "rust-lang-nursery/futures-rs"
+
+[badges.travis-ci]
+repository = "rust-lang-nursery/futures-rs"
diff --git a/rustc_deps/vendor/futures/LICENSE-APACHE b/rustc_deps/vendor/futures/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rustc_deps/vendor/futures/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rustc_deps/vendor/futures/LICENSE-MIT b/rustc_deps/vendor/futures/LICENSE-MIT
new file mode 100644
index 0000000..28e630c
--- /dev/null
+++ b/rustc_deps/vendor/futures/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rustc_deps/vendor/futures/README.md b/rustc_deps/vendor/futures/README.md
new file mode 100644
index 0000000..aeb3fd4
--- /dev/null
+++ b/rustc_deps/vendor/futures/README.md
@@ -0,0 +1,60 @@
+# futures-rs
+
+This library is an implementation of **zero-cost futures** in Rust.
+
+[![Build Status](https://travis-ci.org/rust-lang-nursery/futures-rs.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/futures-rs)
+[![Build status](https://ci.appveyor.com/api/projects/status/yl5w3ittk4kggfsh?svg=true)](https://ci.appveyor.com/project/rust-lang-nursery/futures-rs)
+[![Crates.io](https://img.shields.io/crates/v/futures.svg?maxAge=2592000)](https://crates.io/crates/futures)
+
+[Documentation](https://docs.rs/futures)
+
+[Tutorial](https://tokio.rs/docs/getting-started/futures/)
+
+## Usage
+
+First, add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures = "0.1.17"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate futures;
+
+use futures::Future;
+```
+
+For more information about how you can use futures with async I/O you can take a
+look at [https://tokio.rs](https://tokio.rs) which is an introduction to both
+the Tokio stack and also futures.
+
+### Feature `use_std`
+
+`futures-rs` works without the standard library, such as in bare metal environments.
+However, it has a significantly reduced API surface. To use `futures-rs` in
+a `#[no_std]` environment, use:
+
+```toml
+[dependencies]
+futures = { version = "0.1.17", default-features = false }
+```
+
+# License
+
+This project is licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+   http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+   http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in Futures by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/rustc_deps/vendor/futures/appveyor.yml b/rustc_deps/vendor/futures/appveyor.yml
new file mode 100644
index 0000000..b516f60
--- /dev/null
+++ b/rustc_deps/vendor/futures/appveyor.yml
@@ -0,0 +1,39 @@
+environment:
+
+  # At the time this was added AppVeyor was having troubles with checking
+  # revocation of SSL certificates of sites like static.rust-lang.org and what
+  # we think is crates.io. The libcurl HTTP client by default checks for
+  # revocation on Windows and according to a mailing list [1] this can be
+  # disabled.
+  #
+  # The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL
+  # revocation checking on Windows in libcurl. Note, though, that rustup, which
+  # we're using to download Rust here, also uses libcurl as the default backend.
+  # Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation
+  # checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to
+  # use the Hyper instead of libcurl backend. Both Hyper and libcurl use
+  # schannel on Windows but it appears that Hyper configures it slightly
+  # differently such that revocation checking isn't turned on by default.
+  #
+  # [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html
+  RUSTUP_USE_HYPER: 1
+  CARGO_HTTP_CHECK_REVOKE: false
+
+  matrix:
+  - TARGET: x86_64-pc-windows-msvc
+install:
+  - set PATH=C:\Program Files\Git\mingw64\bin;%PATH%
+  - curl -sSf -o rustup-init.exe https://win.rustup.rs/
+  - rustup-init.exe -y --default-host %TARGET%
+  - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
+  - rustc -V
+  - cargo -V
+
+build: false
+
+test_script:
+  - cargo build
+  - cargo build --no-default-features
+  - cargo test
+  - cargo test --no-default-features --features use_std
+  - cargo test --manifest-path futures-cpupool/Cargo.toml
diff --git a/rustc_deps/vendor/futures/benches/bilock.rs b/rustc_deps/vendor/futures/benches/bilock.rs
new file mode 100644
index 0000000..0f84028
--- /dev/null
+++ b/rustc_deps/vendor/futures/benches/bilock.rs
@@ -0,0 +1,121 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::{Async, Poll};
+use futures::executor;
+use futures::executor::{Notify, NotifyHandle};
+use futures::sync::BiLock;
+use futures::sync::BiLockAcquire;
+use futures::sync::BiLockAcquired;
+use futures::future::Future;
+use futures::stream::Stream;
+
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+
+/// Pseudo-stream which simply calls `lock.poll()` on `poll`
+struct LockStream {
+    lock: BiLockAcquire<u32>,
+}
+
+impl LockStream {
+    fn new(lock: BiLock<u32>) -> LockStream {
+        LockStream {
+            lock: lock.lock()
+        }
+    }
+
+    /// Release a lock after it was acquired in `poll`,
+    /// so `poll` could be called again.
+    fn release_lock(&mut self, guard: BiLockAcquired<u32>) {
+        self.lock = guard.unlock().lock()
+    }
+}
+
+impl Stream for LockStream {
+    type Item = BiLockAcquired<u32>;
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        self.lock.poll().map(|a| match a {
+            Async::Ready(a) => Async::Ready(Some(a)),
+            Async::NotReady => Async::NotReady,
+        })
+    }
+}
+
+
+#[bench]
+fn contended(b: &mut Bencher) {
+    b.iter(|| {
+        let (x, y) = BiLock::new(1);
+
+        let mut x = executor::spawn(LockStream::new(x));
+        let mut y = executor::spawn(LockStream::new(y));
+
+        for _ in 0..1000 {
+            let x_guard = match x.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            // Try poll second lock while first lock still holds the lock
+            match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::NotReady) => (),
+                _ => panic!(),
+            };
+
+            x.get_mut().release_lock(x_guard);
+
+            let y_guard = match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            y.get_mut().release_lock(y_guard);
+        }
+        (x, y)
+    });
+}
+
+#[bench]
+fn lock_unlock(b: &mut Bencher) {
+    b.iter(|| {
+        let (x, y) = BiLock::new(1);
+
+        let mut x = executor::spawn(LockStream::new(x));
+        let mut y = executor::spawn(LockStream::new(y));
+
+        for _ in 0..1000 {
+            let x_guard = match x.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            x.get_mut().release_lock(x_guard);
+
+            let y_guard = match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            y.get_mut().release_lock(y_guard);
+        }
+        (x, y)
+    })
+}
diff --git a/rustc_deps/vendor/futures/benches/futures_unordered.rs b/rustc_deps/vendor/futures/benches/futures_unordered.rs
new file mode 100644
index 0000000..c922df5
--- /dev/null
+++ b/rustc_deps/vendor/futures/benches/futures_unordered.rs
@@ -0,0 +1,43 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::*;
+use futures::stream::FuturesUnordered;
+use futures::sync::oneshot;
+
+use test::Bencher;
+
+use std::collections::VecDeque;
+use std::thread;
+
+#[bench]
+fn oneshots(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    b.iter(|| {
+        let mut txs = VecDeque::with_capacity(NUM);
+        let mut rxs = FuturesUnordered::new();
+
+        for _ in 0..NUM {
+            let (tx, rx) = oneshot::channel();
+            txs.push_back(tx);
+            rxs.push(rx);
+        }
+
+        thread::spawn(move || {
+            while let Some(tx) = txs.pop_front() {
+                let _ = tx.send("hello");
+            }
+        });
+
+        future::lazy(move || {
+            loop {
+                if let Ok(Async::Ready(None)) = rxs.poll() {
+                    return Ok::<(), ()>(());
+                }
+            }
+        }).wait().unwrap();
+    });
+}
diff --git a/rustc_deps/vendor/futures/benches/poll.rs b/rustc_deps/vendor/futures/benches/poll.rs
new file mode 100644
index 0000000..1fec653
--- /dev/null
+++ b/rustc_deps/vendor/futures/benches/poll.rs
@@ -0,0 +1,72 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::*;
+use futures::executor::{Notify, NotifyHandle};
+use futures::task::Task;
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+#[bench]
+fn task_init(b: &mut Bencher) {
+    const NUM: u32 = 100_000;
+
+    struct MyFuture {
+        num: u32,
+        task: Option<Task>,
+    };
+
+    impl Future for MyFuture {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.num == NUM {
+                Ok(Async::Ready(()))
+            } else {
+                self.num += 1;
+
+                if let Some(ref t) = self.task {
+                    if t.will_notify_current() {
+                        t.notify();
+                        return Ok(Async::NotReady);
+                    }
+                }
+
+                let t = task::current();
+                t.notify();
+                self.task = Some(t);
+
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    let notify = notify_noop();
+
+    let mut fut = executor::spawn(MyFuture {
+        num: 0,
+        task: None,
+    });
+
+    b.iter(|| {
+        fut.get_mut().num = 0;
+
+        while let Ok(Async::NotReady) = fut.poll_future_notify(&notify, 0) {
+        }
+    });
+}
diff --git a/rustc_deps/vendor/futures/benches/sync_mpsc.rs b/rustc_deps/vendor/futures/benches/sync_mpsc.rs
new file mode 100644
index 0000000..c0365c5
--- /dev/null
+++ b/rustc_deps/vendor/futures/benches/sync_mpsc.rs
@@ -0,0 +1,168 @@
+#![feature(test)]
+
+#[macro_use]
+extern crate futures;
+extern crate test;
+
+use futures::{Async, Poll, AsyncSink};
+use futures::executor;
+use futures::executor::{Notify, NotifyHandle};
+
+use futures::sink::Sink;
+use futures::stream::Stream;
+
+use futures::sync::mpsc::unbounded;
+use futures::sync::mpsc::channel;
+use futures::sync::mpsc::Sender;
+use futures::sync::mpsc::UnboundedSender;
+
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+/// Single producer, single consumer
+#[bench]
+fn unbounded_1_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = unbounded();
+
+        let mut rx = executor::spawn(rx);
+
+        // 1000 iterations to avoid measuring overhead of initialization
+        // Result should be divided by 1000
+        for i in 0..1000 {
+
+            // Poll, not ready, park
+            assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(&notify_noop(), 1));
+
+            UnboundedSender::unbounded_send(&tx, i).unwrap();
+
+            // Now poll ready
+            assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(&notify_noop(), 1));
+        }
+    })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn unbounded_100_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = unbounded();
+
+        let mut rx = executor::spawn(rx);
+
+        let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect();
+
+        // 1000 send/recv operations total, result should be divided by 1000
+        for _ in 0..10 {
+            for i in 0..tx.len() {
+                assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(&notify_noop(), 1));
+
+                UnboundedSender::unbounded_send(&tx[i], i).unwrap();
+
+                assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(&notify_noop(), 1));
+            }
+        }
+    })
+}
+
+#[bench]
+fn unbounded_uncontended(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, mut rx) = unbounded();
+
+        for i in 0..1000 {
+            UnboundedSender::unbounded_send(&tx, i).expect("send");
+            // No need to create a task, because poll is not going to park.
+            assert_eq!(Ok(Async::Ready(Some(i))), rx.poll());
+        }
+    })
+}
+
+
+/// A Stream that continuously sends incrementing number of the queue
+struct TestSender {
+    tx: Sender<u32>,
+    last: u32, // Last number sent
+}
+
+// Could be a Future, it doesn't matter
+impl Stream for TestSender {
+    type Item = u32;
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        match self.tx.start_send(self.last + 1) {
+            Err(_) => panic!(),
+            Ok(AsyncSink::Ready) => {
+                self.last += 1;
+                Ok(Async::Ready(Some(self.last)))
+            }
+            Ok(AsyncSink::NotReady(_)) => {
+                Ok(Async::NotReady)
+            }
+        }
+    }
+}
+
+
+/// Single producers, single consumer
+#[bench]
+fn bounded_1_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = channel(0);
+
+        let mut tx = executor::spawn(TestSender {
+            tx: tx,
+            last: 0,
+        });
+
+        let mut rx = executor::spawn(rx);
+
+        for i in 0..1000 {
+            assert_eq!(Ok(Async::Ready(Some(i + 1))), tx.poll_stream_notify(&notify_noop(), 1));
+            assert_eq!(Ok(Async::NotReady), tx.poll_stream_notify(&notify_noop(), 1));
+            assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(&notify_noop(), 1));
+        }
+    })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn bounded_100_tx(b: &mut Bencher) {
+    b.iter(|| {
+        // Each sender can send one item after specified capacity
+        let (tx, rx) = channel(0);
+
+        let mut tx: Vec<_> = (0..100).map(|_| {
+            executor::spawn(TestSender {
+                tx: tx.clone(),
+                last: 0
+            })
+        }).collect();
+
+        let mut rx = executor::spawn(rx);
+
+        for i in 0..10 {
+            for j in 0..tx.len() {
+                // Send an item
+                assert_eq!(Ok(Async::Ready(Some(i + 1))), tx[j].poll_stream_notify(&notify_noop(), 1));
+                // Then block
+                assert_eq!(Ok(Async::NotReady), tx[j].poll_stream_notify(&notify_noop(), 1));
+                // Recv the item
+                assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(&notify_noop(), 1));
+            }
+        }
+    })
+}
diff --git a/rustc_deps/vendor/futures/benches/thread_notify.rs b/rustc_deps/vendor/futures/benches/thread_notify.rs
new file mode 100644
index 0000000..9293235
--- /dev/null
+++ b/rustc_deps/vendor/futures/benches/thread_notify.rs
@@ -0,0 +1,114 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::{Future, Poll, Async};
+use futures::task::{self, Task};
+
+use test::Bencher;
+
+#[bench]
+fn thread_yield_single_thread_one_wait(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    struct Yield {
+        rem: usize,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                task::current().notify();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    b.iter(|| {
+        let y = Yield { rem: NUM };
+        y.wait().unwrap();
+    });
+}
+
+#[bench]
+fn thread_yield_single_thread_many_wait(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    struct Yield {
+        rem: usize,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                task::current().notify();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    b.iter(|| {
+        for _ in 0..NUM {
+            let y = Yield { rem: 1 };
+            y.wait().unwrap();
+        }
+    });
+}
+
+#[bench]
+fn thread_yield_multi_thread(b: &mut Bencher) {
+    use std::sync::mpsc;
+    use std::thread;
+
+    const NUM: usize = 1_000;
+
+    let (tx, rx) = mpsc::sync_channel::<Task>(10_000);
+
+    struct Yield {
+        rem: usize,
+        tx: mpsc::SyncSender<Task>,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                self.tx.send(task::current()).unwrap();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    thread::spawn(move || {
+        while let Ok(task) = rx.recv() {
+            task.notify();
+        }
+    });
+
+    b.iter(move || {
+        let y = Yield {
+            rem: NUM,
+            tx: tx.clone(),
+        };
+
+        y.wait().unwrap();
+    });
+}
diff --git a/rustc_deps/vendor/futures/src/executor.rs b/rustc_deps/vendor/futures/src/executor.rs
new file mode 100644
index 0000000..365642f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/executor.rs
@@ -0,0 +1,17 @@
+//! Executors
+//!
+//! This module contains tools for managing the raw execution of futures,
+//! which is needed when building *executors* (places where futures can run).
+//!
+//! More information about executors can be [found online at tokio.rs][online].
+//!
+//! [online]: https://tokio.rs/docs/going-deeper-futures/tasks/
+
+#[allow(deprecated)]
+#[doc(hidden)]
+#[cfg(feature = "use_std")]
+pub use task_impl::{Unpark, Executor, Run};
+
+pub use task_impl::{Spawn, spawn, Notify, with_notify};
+
+pub use task_impl::{UnsafeNotify, NotifyHandle};
diff --git a/rustc_deps/vendor/futures/src/future/and_then.rs b/rustc_deps/vendor/futures/src/future/and_then.rs
new file mode 100644
index 0000000..2e5b6aa
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/and_then.rs
@@ -0,0 +1,38 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `and_then` combinator, chaining a computation onto the end of
+/// another future which completes successfully.
+///
+/// This is created by the `Future::and_then` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct AndThen<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> AndThen<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+{
+    AndThen {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for AndThen<A, B, F>
+    where A: Future,
+          B: IntoFuture<Error=A::Error>,
+          F: FnOnce(A::Item) -> B,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self) -> Poll<B::Item, B::Error> {
+        self.state.poll(|result, f| {
+            result.map(|e| {
+                Err(f(e).into_future())
+            })
+        })
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/catch_unwind.rs b/rustc_deps/vendor/futures/src/future/catch_unwind.rs
new file mode 100644
index 0000000..f87f118
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/catch_unwind.rs
@@ -0,0 +1,51 @@
+use std::prelude::v1::*;
+use std::any::Any;
+use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
+
+use {Future, Poll, Async};
+
+/// Future for the `catch_unwind` combinator.
+///
+/// This is created by the `Future::catch_unwind` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct CatchUnwind<F> where F: Future {
+    future: Option<F>,
+}
+
+pub fn new<F>(future: F) -> CatchUnwind<F>
+    where F: Future + UnwindSafe,
+{
+    CatchUnwind {
+        future: Some(future),
+    }
+}
+
+impl<F> Future for CatchUnwind<F>
+    where F: Future + UnwindSafe,
+{
+    type Item = Result<F::Item, F::Error>;
+    type Error = Box<Any + Send>;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let mut future = self.future.take().expect("cannot poll twice");
+        let (res, future) = catch_unwind(|| (future.poll(), future))?;
+        match res {
+            Ok(Async::NotReady) => {
+                self.future = Some(future);
+                Ok(Async::NotReady)
+            }
+            Ok(Async::Ready(t)) => Ok(Async::Ready(Ok(t))),
+            Err(e) => Ok(Async::Ready(Err(e))),
+        }
+    }
+}
+
+impl<F: Future> Future for AssertUnwindSafe<F> {
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn poll(&mut self) -> Poll<F::Item, F::Error> {
+        self.0.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/chain.rs b/rustc_deps/vendor/futures/src/future/chain.rs
new file mode 100644
index 0000000..1bf5cd6
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/chain.rs
@@ -0,0 +1,48 @@
+use core::mem;
+
+use {Future, Poll, Async};
+
+#[derive(Debug)]
+pub enum Chain<A, B, C> where A: Future {
+    First(A, C),
+    Second(B),
+    Done,
+}
+
+impl<A, B, C> Chain<A, B, C>
+    where A: Future,
+          B: Future,
+{
+    pub fn new(a: A, c: C) -> Chain<A, B, C> {
+        Chain::First(a, c)
+    }
+
+    pub fn poll<F>(&mut self, f: F) -> Poll<B::Item, B::Error>
+        where F: FnOnce(Result<A::Item, A::Error>, C)
+                        -> Result<Result<B::Item, B>, B::Error>,
+    {
+        let a_result = match *self {
+            Chain::First(ref mut a, _) => {
+                match a.poll() {
+                    Ok(Async::NotReady) => return Ok(Async::NotReady),
+                    Ok(Async::Ready(t)) => Ok(t),
+                    Err(e) => Err(e),
+                }
+            }
+            Chain::Second(ref mut b) => return b.poll(),
+            Chain::Done => panic!("cannot poll a chained future twice"),
+        };
+        let data = match mem::replace(self, Chain::Done) {
+            Chain::First(_, c) => c,
+            _ => panic!(),
+        };
+        match f(a_result, data)? {
+            Ok(e) => Ok(Async::Ready(e)),
+            Err(mut b) => {
+                let ret = b.poll();
+                *self = Chain::Second(b);
+                ret
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/either.rs b/rustc_deps/vendor/futures/src/future/either.rs
new file mode 100644
index 0000000..f8c47f1
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/either.rs
@@ -0,0 +1,39 @@
+use {Future, Poll};
+
+/// Combines two different futures yielding the same item and error
+/// types into a single type.
+#[derive(Debug)]
+pub enum Either<A, B> {
+    /// First branch of the type
+    A(A),
+    /// Second branch of the type
+    B(B),
+}
+
+impl<T, A, B> Either<(T, A), (T, B)> {
+    /// Splits out the homogeneous type from an either of tuples.
+    ///
+    /// This method is typically useful when combined with the `Future::select2`
+    /// combinator.
+    pub fn split(self) -> (T, Either<A, B>) {
+        match self {
+            Either::A((a, b)) => (a, Either::A(b)),
+            Either::B((a, b)) => (a, Either::B(b)),
+        }
+    }
+}
+
+impl<A, B> Future for Either<A, B>
+    where A: Future,
+          B: Future<Item = A::Item, Error = A::Error>
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<A::Item, A::Error> {
+        match *self {
+            Either::A(ref mut a) => a.poll(),
+            Either::B(ref mut b) => b.poll(),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/empty.rs b/rustc_deps/vendor/futures/src/future/empty.rs
new file mode 100644
index 0000000..fbb56b2
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/empty.rs
@@ -0,0 +1,31 @@
+//! Definition of the Empty combinator, a future that's never ready.
+
+use core::marker;
+
+use {Future, Poll, Async};
+
+/// A future which is never resolved.
+///
+/// This future can be created with the `empty` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Empty<T, E> {
+    _data: marker::PhantomData<(T, E)>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// The returned future will forever return `Async::NotReady`.
+pub fn empty<T, E>() -> Empty<T, E> {
+    Empty { _data: marker::PhantomData }
+}
+
+impl<T, E> Future for Empty<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        Ok(Async::NotReady)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/flatten.rs b/rustc_deps/vendor/futures/src/future/flatten.rs
new file mode 100644
index 0000000..bfe2869
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/flatten.rs
@@ -0,0 +1,49 @@
+use {Future, IntoFuture, Poll};
+use core::fmt;
+use super::chain::Chain;
+
+/// Future for the `flatten` combinator, flattening a future-of-a-future to get just
+/// the result of the final future.
+///
+/// This is created by the `Future::flatten` method.
+#[must_use = "futures do nothing unless polled"]
+pub struct Flatten<A> where A: Future, A::Item: IntoFuture {
+    state: Chain<A, <A::Item as IntoFuture>::Future, ()>,
+}
+
+impl<A> fmt::Debug for Flatten<A>
+    where A: Future + fmt::Debug,
+          A::Item: IntoFuture,
+          <<A as IntoFuture>::Item as IntoFuture>::Future: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Flatten")
+            .field("state", &self.state)
+            .finish()
+    }
+}
+
+pub fn new<A>(future: A) -> Flatten<A>
+    where A: Future,
+          A::Item: IntoFuture,
+{
+    Flatten {
+        state: Chain::new(future, ()),
+    }
+}
+
+impl<A> Future for Flatten<A>
+    where A: Future,
+          A::Item: IntoFuture,
+          <<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error>
+{
+    type Item = <<A as Future>::Item as IntoFuture>::Item;
+    type Error = <<A as Future>::Item as IntoFuture>::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.state.poll(|a, ()| {
+            let future = a?.into_future();
+            Ok(Err(future))
+        })
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/flatten_stream.rs b/rustc_deps/vendor/futures/src/future/flatten_stream.rs
new file mode 100644
index 0000000..7bf3b9c
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/flatten_stream.rs
@@ -0,0 +1,99 @@
+use {Async, Future, Poll};
+use core::fmt;
+use stream::Stream;
+
+/// Future for the `flatten_stream` combinator, flattening a
+/// future-of-a-stream to get just the result of the final stream as a stream.
+///
+/// This is created by the `Future::flatten_stream` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct FlattenStream<F>
+    where F: Future,
+          <F as Future>::Item: Stream<Error=F::Error>,
+{
+    state: State<F>
+}
+
+impl<F> fmt::Debug for FlattenStream<F>
+    where F: Future + fmt::Debug,
+          <F as Future>::Item: Stream<Error=F::Error> + fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("FlattenStream")
+            .field("state", &self.state)
+            .finish()
+    }
+}
+
+pub fn new<F>(f: F) -> FlattenStream<F>
+    where F: Future,
+          <F as Future>::Item: Stream<Error=F::Error>,
+{
+    FlattenStream {
+        state: State::Future(f)
+    }
+}
+
+#[derive(Debug)]
+enum State<F>
+    where F: Future,
+          <F as Future>::Item: Stream<Error=F::Error>,
+{
+    // future is not yet called or called and not ready
+    Future(F),
+    // future resolved to Stream
+    Stream(F::Item),
+    // EOF after future resolved to error
+    Eof,
+    // after EOF after future resolved to error
+    Done,
+}
+
+impl<F> Stream for FlattenStream<F>
+    where F: Future,
+          <F as Future>::Item: Stream<Error=F::Error>,
+{
+    type Item = <F::Item as Stream>::Item;
+    type Error = <F::Item as Stream>::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        loop {
+            let (next_state, ret_opt) = match self.state {
+                State::Future(ref mut f) => {
+                    match f.poll() {
+                        Ok(Async::NotReady) => {
+                            // State is not changed, early return.
+                            return Ok(Async::NotReady)
+                        },
+                        Ok(Async::Ready(stream)) => {
+                            // Future resolved to stream.
+                            // We do not return, but poll that
+                            // stream in the next loop iteration.
+                            (State::Stream(stream), None)
+                        }
+                        Err(e) => {
+                            (State::Eof, Some(Err(e)))
+                        }
+                    }
+                }
+                State::Stream(ref mut s) => {
+                    // Just forward call to the stream,
+                    // do not track its state.
+                    return s.poll();
+                }
+                State::Eof => {
+                    (State::Done, Some(Ok(Async::Ready(None))))
+                }
+                State::Done => {
+                    panic!("poll called after eof");
+                }
+            };
+
+            self.state = next_state;
+            if let Some(ret) = ret_opt {
+                return ret;
+            }
+        }
+    }
+}
+
diff --git a/rustc_deps/vendor/futures/src/future/from_err.rs b/rustc_deps/vendor/futures/src/future/from_err.rs
new file mode 100644
index 0000000..97e35d7
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/from_err.rs
@@ -0,0 +1,35 @@
+use core::marker::PhantomData;
+
+use {Future, Poll, Async};
+
+/// Future for the `from_err` combinator, changing the error type of a future.
+///
+/// This is created by the `Future::from_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct FromErr<A, E> where A: Future {
+    future: A,
+    f: PhantomData<E>
+}
+
+pub fn new<A, E>(future: A) -> FromErr<A, E>
+    where A: Future
+{
+    FromErr {
+        future: future,
+        f: PhantomData
+    }
+}
+
+impl<A:Future, E:From<A::Error>> Future for FromErr<A, E> {
+    type Item = A::Item;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<A::Item, E> {
+        let e = match self.future.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            other => other,
+        };
+        e.map_err(From::from)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/fuse.rs b/rustc_deps/vendor/futures/src/future/fuse.rs
new file mode 100644
index 0000000..05ad3d5
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/fuse.rs
@@ -0,0 +1,49 @@
+use {Future, Poll, Async};
+
+/// A future which "fuses" a future once it's been resolved.
+///
+/// Normally futures can behave unpredictable once they're used after a future
+/// has been resolved, but `Fuse` is always defined to return `Async::NotReady`
+/// from `poll` after it has resolved successfully or returned an error.
+///
+/// This is created by the `Future::fuse` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Fuse<A: Future> {
+    future: Option<A>,
+}
+
+pub fn new<A: Future>(f: A) -> Fuse<A> {
+    Fuse {
+        future: Some(f),
+    }
+}
+
+impl<A: Future> Fuse<A> {
+    /// Returns whether the underlying future has finished or not.
+    /// 
+    /// If this method returns `true`, then all future calls to `poll`
+    /// are guaranteed to return `Ok(Async::NotReady)`. If this returns
+    /// false, then the underlying future has not been driven to
+    /// completion.
+    pub fn is_done(&self) -> bool {
+        self.future.is_none()
+    }
+}
+
+impl<A: Future> Future for Fuse<A> {
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<A::Item, A::Error> {
+        let res = self.future.as_mut().map(|f| f.poll());
+        match res.unwrap_or(Ok(Async::NotReady)) {
+            res @ Ok(Async::Ready(_)) |
+            res @ Err(_) => {
+                self.future = None;
+                res
+            }
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/inspect.rs b/rustc_deps/vendor/futures/src/future/inspect.rs
new file mode 100644
index 0000000..59fcd78
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/inspect.rs
@@ -0,0 +1,40 @@
+use {Future, Poll, Async};
+
+/// Do something with the item of a future, passing it on.
+///
+/// This is created by the `Future::inspect` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Inspect<A, F> where A: Future {
+    future: A,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Inspect<A, F>
+    where A: Future,
+          F: FnOnce(&A::Item),
+{
+    Inspect {
+        future: future,
+        f: Some(f),
+    }
+}
+
+impl<A, F> Future for Inspect<A, F>
+    where A: Future,
+          F: FnOnce(&A::Item),
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<A::Item, A::Error> {
+        match self.future.poll() {
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Ok(Async::Ready(e)) => {
+                (self.f.take().expect("cannot poll Inspect twice"))(&e);
+                Ok(Async::Ready(e))
+            },
+            Err(e) => Err(e),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/into_stream.rs b/rustc_deps/vendor/futures/src/future/into_stream.rs
new file mode 100644
index 0000000..6e299e6
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/into_stream.rs
@@ -0,0 +1,36 @@
+use {Async, Poll};
+use Future;
+use stream::Stream;
+
+/// Future that forwards one element from the underlying future
+/// (whether it is success of error) and emits EOF after that.
+#[derive(Debug)]
+pub struct IntoStream<F: Future> {
+    future: Option<F>
+}
+
+pub fn new<F: Future>(future: F) -> IntoStream<F> {
+    IntoStream {
+        future: Some(future)
+    }
+}
+
+impl<F: Future> Stream for IntoStream<F> {
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        let ret = match self.future {
+            None => return Ok(Async::Ready(None)),
+            Some(ref mut future) => {
+                match future.poll() {
+                    Ok(Async::NotReady) => return Ok(Async::NotReady),
+                    Err(e) => Err(e),
+                    Ok(Async::Ready(r)) => Ok(r),
+                }
+            }
+        };
+        self.future = None;
+        ret.map(|r| Async::Ready(Some(r)))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/join.rs b/rustc_deps/vendor/futures/src/future/join.rs
new file mode 100644
index 0000000..4521212
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/join.rs
@@ -0,0 +1,172 @@
+#![allow(non_snake_case)]
+
+use core::fmt;
+use core::mem;
+
+use {Future, Poll, IntoFuture, Async};
+
+macro_rules! generate {
+    ($(
+        $(#[$doc:meta])*
+        ($Join:ident, $new:ident, <A, $($B:ident),*>),
+    )*) => ($(
+        $(#[$doc])*
+        #[must_use = "futures do nothing unless polled"]
+        pub struct $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            a: MaybeDone<A>,
+            $($B: MaybeDone<$B>,)*
+        }
+
+        impl<A, $($B),*> fmt::Debug for $Join<A, $($B),*>
+            where A: Future + fmt::Debug,
+                  A::Item: fmt::Debug,
+                  $(
+                      $B: Future<Error=A::Error> + fmt::Debug,
+                      $B::Item: fmt::Debug
+                  ),*
+        {
+            fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+                fmt.debug_struct(stringify!($Join))
+                    .field("a", &self.a)
+                    $(.field(stringify!($B), &self.$B))*
+                    .finish()
+            }
+        }
+
+        pub fn $new<A, $($B),*>(a: A, $($B: $B),*) -> $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            $Join {
+                a: MaybeDone::NotYet(a),
+                $($B: MaybeDone::NotYet($B)),*
+            }
+        }
+
+        impl<A, $($B),*> $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            fn erase(&mut self) {
+                self.a = MaybeDone::Gone;
+                $(self.$B = MaybeDone::Gone;)*
+            }
+        }
+
+        impl<A, $($B),*> Future for $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            type Item = (A::Item, $($B::Item),*);
+            type Error = A::Error;
+
+            fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+                let mut all_done = match self.a.poll() {
+                    Ok(done) => done,
+                    Err(e) => {
+                        self.erase();
+                        return Err(e)
+                    }
+                };
+                $(
+                    all_done = match self.$B.poll() {
+                        Ok(done) => all_done && done,
+                        Err(e) => {
+                            self.erase();
+                            return Err(e)
+                        }
+                    };
+                )*
+
+                if all_done {
+                    Ok(Async::Ready((self.a.take(), $(self.$B.take()),*)))
+                } else {
+                    Ok(Async::NotReady)
+                }
+            }
+        }
+
+        impl<A, $($B),*> IntoFuture for (A, $($B),*)
+            where A: IntoFuture,
+        $(
+            $B: IntoFuture<Error=A::Error>
+        ),*
+        {
+            type Future = $Join<A::Future, $($B::Future),*>;
+            type Item = (A::Item, $($B::Item),*);
+            type Error = A::Error;
+
+            fn into_future(self) -> Self::Future {
+                match self {
+                    (a, $($B),+) => {
+                        $new(
+                            IntoFuture::into_future(a),
+                            $(IntoFuture::into_future($B)),+
+                        )
+                    }
+                }
+            }
+        }
+
+    )*)
+}
+
+generate! {
+    /// Future for the `join` combinator, waiting for two futures to
+    /// complete.
+    ///
+    /// This is created by the `Future::join` method.
+    (Join, new, <A, B>),
+
+    /// Future for the `join3` combinator, waiting for three futures to
+    /// complete.
+    ///
+    /// This is created by the `Future::join3` method.
+    (Join3, new3, <A, B, C>),
+
+    /// Future for the `join4` combinator, waiting for four futures to
+    /// complete.
+    ///
+    /// This is created by the `Future::join4` method.
+    (Join4, new4, <A, B, C, D>),
+
+    /// Future for the `join5` combinator, waiting for five futures to
+    /// complete.
+    ///
+    /// This is created by the `Future::join5` method.
+    (Join5, new5, <A, B, C, D, E>),
+}
+
+#[derive(Debug)]
+enum MaybeDone<A: Future> {
+    NotYet(A),
+    Done(A::Item),
+    Gone,
+}
+
+impl<A: Future> MaybeDone<A> {
+    fn poll(&mut self) -> Result<bool, A::Error> {
+        let res = match *self {
+            MaybeDone::NotYet(ref mut a) => a.poll()?,
+            MaybeDone::Done(_) => return Ok(true),
+            MaybeDone::Gone => panic!("cannot poll Join twice"),
+        };
+        match res {
+            Async::Ready(res) => {
+                *self = MaybeDone::Done(res);
+                Ok(true)
+            }
+            Async::NotReady => Ok(false),
+        }
+    }
+
+    fn take(&mut self) -> A::Item {
+        match mem::replace(self, MaybeDone::Gone) {
+            MaybeDone::Done(a) => a,
+            _ => panic!(),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/join_all.rs b/rustc_deps/vendor/futures/src/future/join_all.rs
new file mode 100644
index 0000000..398a7a4
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/join_all.rs
@@ -0,0 +1,136 @@
+//! Definition of the `JoinAll` combinator, waiting for all of a list of futures
+//! to finish.
+
+use std::prelude::v1::*;
+
+use std::fmt;
+use std::mem;
+
+use {Future, IntoFuture, Poll, Async};
+
+#[derive(Debug)]
+enum ElemState<T> where T: Future {
+    Pending(T),
+    Done(T::Item),
+}
+
+/// A future which takes a list of futures and resolves with a vector of the
+/// completed values.
+///
+/// This future is created with the `join_all` method.
+#[must_use = "futures do nothing unless polled"]
+pub struct JoinAll<I>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    elems: Vec<ElemState<<I::Item as IntoFuture>::Future>>,
+}
+
+impl<I> fmt::Debug for JoinAll<I>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+          <<I as IntoIterator>::Item as IntoFuture>::Future: fmt::Debug,
+          <<I as IntoIterator>::Item as IntoFuture>::Item: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("JoinAll")
+            .field("elems", &self.elems)
+            .finish()
+    }
+}
+
+/// Creates a future which represents a collection of the results of the futures
+/// given.
+///
+/// The returned future will drive execution for all of its underlying futures,
+/// collecting the results into a destination `Vec<T>` in the same order as they
+/// were provided. If any future returns an error then all other futures will be
+/// canceled and an error will be returned immediately. If all futures complete
+/// successfully, however, then the returned future will succeed with a `Vec` of
+/// all the successful results.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let f = join_all(vec![
+///     ok::<u32, u32>(1),
+///     ok::<u32, u32>(2),
+///     ok::<u32, u32>(3),
+/// ]);
+/// let f = f.map(|x| {
+///     assert_eq!(x, [1, 2, 3]);
+/// });
+///
+/// let f = join_all(vec![
+///     Box::new(ok::<u32, u32>(1)),
+///     Box::new(err::<u32, u32>(2)),
+///     Box::new(ok::<u32, u32>(3)),
+/// ]);
+/// let f = f.then(|x| {
+///     assert_eq!(x, Err(2));
+///     x
+/// });
+/// ```
+pub fn join_all<I>(i: I) -> JoinAll<I>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    let elems = i.into_iter().map(|f| {
+        ElemState::Pending(f.into_future())
+    }).collect();
+    JoinAll { elems: elems }
+}
+
+impl<I> Future for JoinAll<I>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    type Item = Vec<<I::Item as IntoFuture>::Item>;
+    type Error = <I::Item as IntoFuture>::Error;
+
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let mut all_done = true;
+
+        for idx in 0 .. self.elems.len() {
+            let done_val = match self.elems[idx] {
+                ElemState::Pending(ref mut t) => {
+                    match t.poll() {
+                        Ok(Async::Ready(v)) => Ok(v),
+                        Ok(Async::NotReady) => {
+                            all_done = false;
+                            continue
+                        }
+                        Err(e) => Err(e),
+                    }
+                }
+                ElemState::Done(ref mut _v) => continue,
+            };
+
+            match done_val {
+                Ok(v) => self.elems[idx] = ElemState::Done(v),
+                Err(e) => {
+                    // On completion drop all our associated resources
+                    // ASAP.
+                    self.elems = Vec::new();
+                    return Err(e)
+                }
+            }
+        }
+
+        if all_done {
+            let elems = mem::replace(&mut self.elems, Vec::new());
+            let result = elems.into_iter().map(|e| {
+                match e {
+                    ElemState::Done(t) => t,
+                    _ => unreachable!(),
+                }
+            }).collect();
+            Ok(Async::Ready(result))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/lazy.rs b/rustc_deps/vendor/futures/src/future/lazy.rs
new file mode 100644
index 0000000..2f31033
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/lazy.rs
@@ -0,0 +1,84 @@
+//! Definition of the Lazy combinator, deferring execution of a function until
+//! the future is polled.
+
+use core::mem;
+
+use {Future, IntoFuture, Poll};
+
+/// A future which defers creation of the actual future until a callback is
+/// scheduled.
+///
+/// This is created by the `lazy` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Lazy<F, R: IntoFuture> {
+    inner: _Lazy<F, R::Future>,
+}
+
+#[derive(Debug)]
+enum _Lazy<F, R> {
+    First(F),
+    Second(R),
+    Moved,
+}
+
+/// Creates a new future which will eventually be the same as the one created
+/// by the closure provided.
+///
+/// The provided closure is only run once the future has a callback scheduled
+/// on it, otherwise the callback never runs. Once run, however, this future is
+/// the same as the one the closure creates.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let a = lazy(|| ok::<u32, u32>(1));
+///
+/// let b = lazy(|| -> FutureResult<u32, u32> {
+///     panic!("oh no!")
+/// });
+/// drop(b); // closure is never run
+/// ```
+pub fn lazy<F, R>(f: F) -> Lazy<F, R>
+    where F: FnOnce() -> R,
+          R: IntoFuture
+{
+    Lazy {
+        inner: _Lazy::First(f),
+    }
+}
+
+impl<F, R> Lazy<F, R>
+    where F: FnOnce() -> R,
+          R: IntoFuture,
+{
+    fn get(&mut self) -> &mut R::Future {
+        match self.inner {
+            _Lazy::First(_) => {}
+            _Lazy::Second(ref mut f) => return f,
+            _Lazy::Moved => panic!(), // can only happen if `f()` panics
+        }
+        match mem::replace(&mut self.inner, _Lazy::Moved) {
+            _Lazy::First(f) => self.inner = _Lazy::Second(f().into_future()),
+            _ => panic!(), // we already found First
+        }
+        match self.inner {
+            _Lazy::Second(ref mut f) => f,
+            _ => panic!(), // we just stored Second
+        }
+    }
+}
+
+impl<F, R> Future for Lazy<F, R>
+    where F: FnOnce() -> R,
+          R: IntoFuture,
+{
+    type Item = R::Item;
+    type Error = R::Error;
+
+    fn poll(&mut self) -> Poll<R::Item, R::Error> {
+        self.get().poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/loop_fn.rs b/rustc_deps/vendor/futures/src/future/loop_fn.rs
new file mode 100644
index 0000000..299a038
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/loop_fn.rs
@@ -0,0 +1,99 @@
+//! Definition of the `LoopFn` combinator, implementing `Future` loops.
+
+use {Async, Future, IntoFuture, Poll};
+
+/// The status of a `loop_fn` loop.
+#[derive(Debug)]
+pub enum Loop<T, S> {
+    /// Indicates that the loop has completed with output `T`.
+    Break(T),
+
+    /// Indicates that the loop function should be called again with input
+    /// state `S`.
+    Continue(S),
+}
+
+/// A future implementing a tail-recursive loop.
+///
+/// Created by the `loop_fn` function.
+#[derive(Debug)]
+pub struct LoopFn<A, F> where A: IntoFuture {
+    future: A::Future,
+    func: F,
+}
+
+/// Creates a new future implementing a tail-recursive loop.
+///
+/// The loop function is immediately called with `initial_state` and should
+/// return a value that can be converted to a future. On successful completion,
+/// this future should output a `Loop<T, S>` to indicate the status of the
+/// loop.
+///
+/// `Loop::Break(T)` halts the loop and completes the future with output `T`.
+///
+/// `Loop::Continue(S)` reinvokes the loop function with state `S`. The returned
+/// future will be subsequently polled for a new `Loop<T, S>` value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::{ok, loop_fn, Future, FutureResult, Loop};
+/// use std::io::Error;
+///
+/// struct Client {
+///     ping_count: u8,
+/// }
+///
+/// impl Client {
+///     fn new() -> Self {
+///         Client { ping_count: 0 }
+///     }
+///
+///     fn send_ping(self) -> FutureResult<Self, Error> {
+///         ok(Client { ping_count: self.ping_count + 1 })
+///     }
+///
+///     fn receive_pong(self) -> FutureResult<(Self, bool), Error> {
+///         let done = self.ping_count >= 5;
+///         ok((self, done))
+///     }
+/// }
+///
+/// let ping_til_done = loop_fn(Client::new(), |client| {
+///     client.send_ping()
+///         .and_then(|client| client.receive_pong())
+///         .and_then(|(client, done)| {
+///             if done {
+///                 Ok(Loop::Break(client))
+///             } else {
+///                 Ok(Loop::Continue(client))
+///             }
+///         })
+/// });
+/// ```
+pub fn loop_fn<S, T, A, F>(initial_state: S, mut func: F) -> LoopFn<A, F>
+    where F: FnMut(S) -> A,
+          A: IntoFuture<Item = Loop<T, S>>,
+{
+    LoopFn {
+        future: func(initial_state).into_future(),
+        func: func,
+    }
+}
+
+impl<S, T, A, F> Future for LoopFn<A, F>
+    where F: FnMut(S) -> A,
+          A: IntoFuture<Item = Loop<T, S>>,
+{
+    type Item = T;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        loop {
+            match try_ready!(self.future.poll()) {
+                Loop::Break(x) => return Ok(Async::Ready(x)),
+                Loop::Continue(s) => self.future = (self.func)(s).into_future(),
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/map.rs b/rustc_deps/vendor/futures/src/future/map.rs
new file mode 100644
index 0000000..4b1f4cd
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/map.rs
@@ -0,0 +1,38 @@
+use {Future, Poll, Async};
+
+/// Future for the `map` combinator, changing the type of a future.
+///
+/// This is created by the `Future::map` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Map<A, F> where A: Future {
+    future: A,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Map<A, F>
+    where A: Future,
+{
+    Map {
+        future: future,
+        f: Some(f),
+    }
+}
+
+impl<U, A, F> Future for Map<A, F>
+    where A: Future,
+          F: FnOnce(A::Item) -> U,
+{
+    type Item = U;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<U, A::Error> {
+        let e = match self.future.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            Ok(Async::Ready(e)) => Ok(e),
+            Err(e) => Err(e),
+        };
+        e.map(self.f.take().expect("cannot poll Map twice"))
+         .map(Async::Ready)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/map_err.rs b/rustc_deps/vendor/futures/src/future/map_err.rs
new file mode 100644
index 0000000..4ea12f4
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/map_err.rs
@@ -0,0 +1,36 @@
+use {Future, Poll, Async};
+
+/// Future for the `map_err` combinator, changing the error type of a future.
+///
+/// This is created by the `Future::map_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct MapErr<A, F> where A: Future {
+    future: A,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> MapErr<A, F>
+    where A: Future
+{
+    MapErr {
+        future: future,
+        f: Some(f),
+    }
+}
+
+impl<U, A, F> Future for MapErr<A, F>
+    where A: Future,
+          F: FnOnce(A::Error) -> U,
+{
+    type Item = A::Item;
+    type Error = U;
+
+    fn poll(&mut self) -> Poll<A::Item, U> {
+        let e = match self.future.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            other => other,
+        };
+        e.map_err(self.f.take().expect("cannot poll MapErr twice"))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/mod.rs b/rustc_deps/vendor/futures/src/future/mod.rs
new file mode 100644
index 0000000..063322e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/mod.rs
@@ -0,0 +1,1170 @@
+//! Futures
+//!
+//! This module contains the `Future` trait and a number of adaptors for this
+//! trait. See the crate docs, and the docs for `Future`, for full detail.
+
+use core::fmt;
+use core::result;
+
+// Primitive futures
+mod empty;
+mod lazy;
+mod poll_fn;
+#[path = "result.rs"]
+mod result_;
+mod loop_fn;
+mod option;
+pub use self::empty::{empty, Empty};
+pub use self::lazy::{lazy, Lazy};
+pub use self::poll_fn::{poll_fn, PollFn};
+pub use self::result_::{result, ok, err, FutureResult};
+pub use self::loop_fn::{loop_fn, Loop, LoopFn};
+
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `ok` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{ok as finished, Ok as Finished};
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `err` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{err as failed, Err as Failed};
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `result` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{result as done, FutureResult as Done};
+#[doc(hidden)]
+#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{FutureResult as Ok};
+#[doc(hidden)]
+#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{FutureResult as Err};
+
+// combinators
+mod and_then;
+mod flatten;
+mod flatten_stream;
+mod fuse;
+mod into_stream;
+mod join;
+mod map;
+mod map_err;
+mod from_err;
+mod or_else;
+mod select;
+mod select2;
+mod then;
+mod either;
+mod inspect;
+
+// impl details
+mod chain;
+
+pub use self::and_then::AndThen;
+pub use self::flatten::Flatten;
+pub use self::flatten_stream::FlattenStream;
+pub use self::fuse::Fuse;
+pub use self::into_stream::IntoStream;
+pub use self::join::{Join, Join3, Join4, Join5};
+pub use self::map::Map;
+pub use self::map_err::MapErr;
+pub use self::from_err::FromErr;
+pub use self::or_else::OrElse;
+pub use self::select::{Select, SelectNext};
+pub use self::select2::Select2;
+pub use self::then::Then;
+pub use self::either::Either;
+pub use self::inspect::Inspect;
+
+if_std! {
+    mod catch_unwind;
+    mod join_all;
+    mod select_all;
+    mod select_ok;
+    mod shared;
+    pub use self::catch_unwind::CatchUnwind;
+    pub use self::join_all::{join_all, JoinAll};
+    pub use self::select_all::{SelectAll, SelectAllNext, select_all};
+    pub use self::select_ok::{SelectOk, select_ok};
+    pub use self::shared::{Shared, SharedItem, SharedError};
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use join_all instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use self::join_all::join_all as collect;
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use JoinAll instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use self::join_all::JoinAll as Collect;
+
+    /// A type alias for `Box<Future + Send>`
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+    pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>;
+
+    impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> {
+        type Item = F::Item;
+        type Error = F::Error;
+
+        fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+            (**self).poll()
+        }
+    }
+}
+
+use {Poll, stream};
+
+/// Trait for types which are a placeholder of a value that may become
+/// available at some later point in time.
+///
+/// In addition to the documentation here you can also find more information
+/// about futures [online] at [https://tokio.rs](https://tokio.rs)
+///
+/// [online]: https://tokio.rs/docs/getting-started/futures/
+///
+/// Futures are used to provide a sentinel through which a value can be
+/// referenced. They crucially allow chaining and composing operations through
+/// consumption which allows expressing entire trees of computation as one
+/// sentinel value.
+///
+/// The ergonomics and implementation of the `Future` trait are very similar to
+/// the `Iterator` trait in that there is just one methods you need
+/// to implement, but you get a whole lot of others for free as a result.
+///
+/// # The `poll` method
+///
+/// The core method of future, `poll`, is used to attempt to generate the value
+/// of a `Future`. This method *does not block* but is allowed to inform the
+/// caller that the value is not ready yet. Implementations of `poll` may
+/// themselves do work to generate the value, but it's guaranteed that this will
+/// never block the calling thread.
+///
+/// A key aspect of this method is that if the value is not yet available the
+/// current task is scheduled to receive a notification when it's later ready to
+/// be made available. This follows what's typically known as a "readiness" or
+/// "pull" model where values are pulled out of futures on demand, and
+/// otherwise a task is notified when a value might be ready to get pulled out.
+///
+/// The `poll` method is not intended to be called in general, but rather is
+/// typically called in the context of a "task" which drives a future to
+/// completion. For more information on this see the `task` module.
+///
+/// More information about the details of `poll` and the nitty-gritty of tasks
+/// can be [found online at tokio.rs][poll-dox].
+///
+/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
+///
+/// # Combinators
+///
+/// Like iterators, futures provide a large number of combinators to work with
+/// futures to express computations in a much more natural method than
+/// scheduling a number of callbacks. For example the `map` method can change
+/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
+/// create a future after the first one is done and only be resolved when the
+/// second is done.
+///
+/// Combinators act very similarly to the methods on the `Iterator` trait itself
+/// or those on `Option` and `Result`. Like with iterators, the combinators are
+/// zero-cost and don't impose any extra layers of indirection you wouldn't
+/// otherwise have to write down.
+///
+/// More information about combinators can be found [on tokio.rs].
+///
+/// [on tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-mechanics/
+pub trait Future {
+    /// The type of value that this future will resolved with if it is
+    /// successful.
+    type Item;
+
+    /// The type of error that this future will resolve with if it fails in a
+    /// normal fashion.
+    type Error;
+
+    /// Query this future to see if its value has become available, registering
+    /// interest if it is not.
+    ///
+    /// This function will check the internal state of the future and assess
+    /// whether the value is ready to be produced. Implementers of this function
+    /// should ensure that a call to this **never blocks** as event loops may
+    /// not work properly otherwise.
+    ///
+    /// When a future is not ready yet, the `Async::NotReady` value will be
+    /// returned. In this situation the future will *also* register interest of
+    /// the current task in the value being produced. This is done by calling
+    /// `task::park` to retrieve a handle to the current `Task`. When the future
+    /// is then ready to make progress (e.g. it should be `poll`ed again) the
+    /// `unpark` method is called on the `Task`.
+    ///
+    /// More information about the details of `poll` and the nitty-gritty of
+    /// tasks can be [found online at tokio.rs][poll-dox].
+    ///
+    /// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
+    ///
+    /// # Runtime characteristics
+    ///
+    /// This function, `poll`, is the primary method for 'making progress'
+    /// within a tree of futures. For example this method will be called
+    /// repeatedly as the internal state machine makes its various transitions.
+    /// Executors are responsible for ensuring that this function is called in
+    /// the right location (e.g. always on an I/O thread or not). Unless it is
+    /// otherwise arranged to be so, it should be ensured that **implementations
+    /// of this function finish very quickly**.
+    ///
+    /// Returning quickly prevents unnecessarily clogging up threads and/or
+    /// event loops while a `poll` function call, for example, takes up compute
+    /// resources to perform some expensive computation. If it is known ahead
+    /// of time that a call to `poll` may end up taking awhile, the work should
+    /// be offloaded to a thread pool (or something similar) to ensure that
+    /// `poll` can return quickly.
+    ///
+    /// Note that the `poll` function is not called repeatedly in a loop for
+    /// futures typically, but only whenever the future itself is ready. If
+    /// you're familiar with the `poll(2)` or `select(2)` syscalls on Unix
+    /// it's worth noting that futures typically do *not* suffer the same
+    /// problems of "all wakeups must poll all events". Futures have enough
+    /// support for only polling futures which cause a wakeup.
+    ///
+    /// # Return value
+    ///
+    /// This function returns `Async::NotReady` if the future is not ready yet,
+    /// `Err` if the future is finished but resolved to an error, or
+    /// `Async::Ready` with the result of this future if it's finished
+    /// successfully. Once a future has finished it is considered a contract
+    /// error to continue polling the future.
+    ///
+    /// If `NotReady` is returned, then the future will internally register
+    /// interest in the value being produced for the current task (through
+    /// `task::park`). In other words, the current task will receive a
+    /// notification (through the `unpark` method) once the value is ready to be
+    /// produced or the future can make progress.
+    ///
+    /// Note that if `NotReady` is returned it only means that *this* task will
+    /// receive a notification. Historical calls to `poll` with different tasks
+    /// will not receive notifications. In other words, implementers of the
+    /// `Future` trait need not store a queue of tasks to notify, but only the
+    /// last task that called this method. Alternatively callers of this method
+    /// can only rely on the most recent task which call `poll` being notified
+    /// when a future is ready.
+    ///
+    /// # Panics
+    ///
+    /// Once a future has completed (returned `Ready` or `Err` from `poll`),
+    /// then any future calls to `poll` may panic, block forever, or otherwise
+    /// cause wrong behavior. The `Future` trait itself provides no guarantees
+    /// about the behavior of `poll` after a future has completed.
+    ///
+    /// Callers who may call `poll` too many times may want to consider using
+    /// the `fuse` adaptor which defines the behavior of `poll`, but comes with
+    /// a little bit of extra cost.
+    ///
+    /// Additionally, calls to `poll` must always be made from within the
+    /// context of a task. If a current task is not set then this method will
+    /// likely panic.
+    ///
+    /// # Errors
+    ///
+    /// This future may have failed to finish the computation, in which case
+    /// the `Err` variant will be returned with an appropriate payload of an
+    /// error.
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error>;
+
+    /// Block the current thread until this future is resolved.
+    ///
+    /// This method will consume ownership of this future, driving it to
+    /// completion via `poll` and blocking the current thread while it's waiting
+    /// for the value to become available. Once the future is resolved the
+    /// result of this future is returned.
+    ///
+    /// > **Note:** This method is not appropriate to call on event loops or
+    /// >           similar I/O situations because it will prevent the event
+    /// >           loop from making progress (this blocks the thread). This
+    /// >           method should only be called when it's guaranteed that the
+    /// >           blocking work associated with this future will be completed
+    /// >           by another thread.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Panics
+    ///
+    /// This function does not attempt to catch panics. If the `poll` function
+    /// of this future panics, panics will be propagated to the caller.
+    #[cfg(feature = "use_std")]
+    fn wait(self) -> result::Result<Self::Item, Self::Error>
+        where Self: Sized
+    {
+        ::executor::spawn(self).wait_future()
+    }
+
+    /// Convenience function for turning this future into a trait object which
+    /// is also `Send`.
+    ///
+    /// This simply avoids the need to write `Box::new` and can often help with
+    /// type inference as well by always returning a trait object. Note that
+    /// this method requires the `Send` bound and returns a `BoxFuture`, which
+    /// also encodes this. If you'd like to create a `Box<Future>` without the
+    /// `Send` bound, then the `Box::new` function can be used instead.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future::{BoxFuture, result};
+    ///
+    /// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed();
+    /// ```
+    #[cfg(feature = "use_std")]
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+    #[allow(deprecated)]
+    fn boxed(self) -> BoxFuture<Self::Item, Self::Error>
+        where Self: Sized + Send + 'static
+    {
+        ::std::boxed::Box::new(self)
+    }
+
+    /// Map this future's result to a different type, returning a new future of
+    /// the resulting type.
+    ///
+    /// This function is similar to the `Option::map` or `Iterator::map` where
+    /// it will change the type of the underlying future. This is useful to
+    /// chain along a computation once a future has been resolved.
+    ///
+    /// The closure provided will only be called if this future is resolved
+    /// successfully. If this future returns an error, panics, or is dropped,
+    /// then the closure provided will never be invoked.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<u32, u32>(1);
+    /// let new_future = future.map(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Ok(4));
+    /// ```
+    ///
+    /// Calling `map` on an errored `Future` has no effect:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::err::<u32, u32>(1);
+    /// let new_future = future.map(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Err(1));
+    /// ```
+    fn map<F, U>(self, f: F) -> Map<Self, F>
+        where F: FnOnce(Self::Item) -> U,
+              Self: Sized,
+    {
+        assert_future::<U, Self::Error, _>(map::new(self, f))
+    }
+
+    /// Map this future's error to a different error, returning a new future.
+    ///
+    /// This function is similar to the `Result::map_err` where it will change
+    /// the error type of the underlying future. This is useful for example to
+    /// ensure that futures have the same error type when used with combinators
+    /// like `select` and `join`.
+    ///
+    /// The closure provided will only be called if this future is resolved
+    /// with an error. If this future returns a success, panics, or is
+    /// dropped, then the closure provided will never be invoked.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::future::*;
+    ///
+    /// let future = err::<u32, u32>(1);
+    /// let new_future = future.map_err(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Err(4));
+    /// ```
+    ///
+    /// Calling `map_err` on a successful `Future` has no effect:
+    ///
+    /// ```
+    /// use futures::future::*;
+    ///
+    /// let future = ok::<u32, u32>(1);
+    /// let new_future = future.map_err(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Ok(1));
+    /// ```
+    fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
+        where F: FnOnce(Self::Error) -> E,
+              Self: Sized,
+    {
+        assert_future::<Self::Item, E, _>(map_err::new(self, f))
+    }
+
+
+
+    /// Map this future's error to any error implementing `From` for
+    /// this future's `Error`, returning a new future.
+    ///
+    /// This function does for futures what `try!` does for `Result`,
+    /// by letting the compiler infer the type of the resulting error.
+    /// Just as `map_err` above, this is useful for example to ensure
+    /// that futures have the same error type when used with
+    /// combinators like `select` and `join`.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future_with_err_u8 = future::err::<(), u8>(1);
+    /// let future_with_err_u32 = future_with_err_u8.from_err::<u32>();
+    /// ```
+    fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E>
+        where Self: Sized,
+    {
+        assert_future::<Self::Item, E, _>(from_err::new(self))
+    }
+
+    /// Chain on a computation for when a future finished, passing the result of
+    /// the future to the provided closure `f`.
+    ///
+    /// This function can be used to ensure a computation runs regardless of
+    /// the conclusion of the future. The closure provided will be yielded a
+    /// `Result` once the future is complete.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed future
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// If this future is dropped or panics then the closure `f` will not be
+    /// run.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future_of_1 = future::ok::<u32, u32>(1);
+    /// let future_of_4 = future_of_1.then(|x| {
+    ///     x.map(|y| y + 3)
+    /// });
+    ///
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
+    /// let future_of_4 = future_of_err_1.then(|x| {
+    ///     match x {
+    ///         Ok(_) => panic!("expected an error"),
+    ///         Err(y) => future::ok::<u32, u32>(y + 3),
+    ///     }
+    /// });
+    /// ```
+    fn then<F, B>(self, f: F) -> Then<Self, B, F>
+        where F: FnOnce(result::Result<Self::Item, Self::Error>) -> B,
+              B: IntoFuture,
+              Self: Sized,
+    {
+        assert_future::<B::Item, B::Error, _>(then::new(self, f))
+    }
+
+    /// Execute another future after this one has resolved successfully.
+    ///
+    /// This function can be used to chain two futures together and ensure that
+    /// the final future isn't resolved until both have finished. The closure
+    /// provided is yielded the successful result of this future and returns
+    /// another value which can be converted into a future.
+    ///
+    /// Note that because `Result` implements the `IntoFuture` trait this method
+    /// can also be useful for chaining fallible and serial computations onto
+    /// the end of one future.
+    ///
+    /// If this future is dropped, panics, or completes with an error then the
+    /// provided closure `f` is never called.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
+    ///
+    /// let future_of_1 = future::ok::<u32, u32>(1);
+    /// let future_of_4 = future_of_1.and_then(|x| {
+    ///     Ok(x + 3)
+    /// });
+    ///
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
+    /// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> {
+    ///     panic!("should not be called in case of an error");
+    /// });
+    /// ```
+    fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
+        where F: FnOnce(Self::Item) -> B,
+              B: IntoFuture<Error = Self::Error>,
+              Self: Sized,
+    {
+        assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
+    }
+
+    /// Execute another future if this one resolves with an error.
+    ///
+    /// Return a future that passes along this future's value if it succeeds,
+    /// and otherwise passes the error to the closure `f` and waits for the
+    /// future it returns. The closure may also simply return a value that can
+    /// be converted into a future.
+    ///
+    /// Note that because `Result` implements the `IntoFuture` trait this method
+    /// can also be useful for chaining together fallback computations, where
+    /// when one fails, the next is attempted.
+    ///
+    /// If this future is dropped, panics, or completes successfully then the
+    /// provided closure `f` is never called.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
+    ///
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
+    /// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
+    ///     Ok(x + 3)
+    /// });
+    ///
+    /// let future_of_1 = future::ok::<u32, u32>(1);
+    /// future_of_1.or_else(|_| -> FutureResult<u32, u32> {
+    ///     panic!("should not be called in case of success");
+    /// });
+    /// ```
+    fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
+        where F: FnOnce(Self::Error) -> B,
+              B: IntoFuture<Item = Self::Item>,
+              Self: Sized,
+    {
+        assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
+    }
+
+    /// Waits for either one of two futures to complete.
+    ///
+    /// This function will return a new future which awaits for either this or
+    /// the `other` future to complete. The returned future will finish with
+    /// both the value resolved and a future representing the completion of the
+    /// other work. Both futures must have the same item and error type.
+    ///
+    /// Note that this function consumes the receiving futures and returns a
+    /// wrapped version of them.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use futures::prelude::*;
+    /// use futures::future;
+    /// use std::thread;
+    /// use std::time;
+    ///
+    /// let future1 = future::lazy(|| {
+    ///     thread::sleep(time::Duration::from_secs(5));
+    ///     future::ok::<char, ()>('a')
+    /// });
+    ///
+    /// let future2 = future::lazy(|| {
+    ///     thread::sleep(time::Duration::from_secs(3));
+    ///     future::ok::<char, ()>('b')
+    /// });
+    ///
+    /// let (value, last_future) = future1.select(future2).wait().ok().unwrap();
+    /// assert_eq!(value, 'a');
+    /// assert_eq!(last_future.wait().unwrap(), 'b');
+    /// ```
+    ///
+    /// A poor-man's `join` implemented on top of `select`:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// fn join<A>(a: A, b: A) -> Box<Future<Item=(u32, u32), Error=u32>>
+    ///     where A: Future<Item = u32, Error = u32> + 'static,
+    /// {
+    ///     Box::new(a.select(b).then(|res| -> Box<Future<Item=_, Error=_>> {
+    ///         match res {
+    ///             Ok((a, b)) => Box::new(b.map(move |b| (a, b))),
+    ///             Err((a, _)) => Box::new(future::err(a)),
+    ///         }
+    ///     }))
+    /// }
+    /// ```
+    fn select<B>(self, other: B) -> Select<Self, B::Future>
+        where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
+              Self: Sized,
+    {
+        let f = select::new(self, other.into_future());
+        assert_future::<(Self::Item, SelectNext<Self, B::Future>),
+                        (Self::Error, SelectNext<Self, B::Future>), _>(f)
+    }
+
+    /// Waits for either one of two differently-typed futures to complete.
+    ///
+    /// This function will return a new future which awaits for either this or
+    /// the `other` future to complete. The returned future will finish with
+    /// both the value resolved and a future representing the completion of the
+    /// other work.
+    ///
+    /// Note that this function consumes the receiving futures and returns a
+    /// wrapped version of them.
+    ///
+    /// Also note that if both this and the second future have the same
+    /// success/error type you can use the `Either::split` method to
+    /// conveniently extract out the value at the end.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future::{self, Either};
+    ///
+    /// // A poor-man's join implemented on top of select2
+    ///
+    /// fn join<A, B, E>(a: A, b: B) -> Box<Future<Item=(A::Item, B::Item), Error=E>>
+    ///     where A: Future<Error = E> + 'static,
+    ///           B: Future<Error = E> + 'static,
+    ///           E: 'static,
+    /// {
+    ///     Box::new(a.select2(b).then(|res| -> Box<Future<Item=_, Error=_>> {
+    ///         match res {
+    ///             Ok(Either::A((x, b))) => Box::new(b.map(move |y| (x, y))),
+    ///             Ok(Either::B((y, a))) => Box::new(a.map(move |x| (x, y))),
+    ///             Err(Either::A((e, _))) => Box::new(future::err(e)),
+    ///             Err(Either::B((e, _))) => Box::new(future::err(e)),
+    ///         }
+    ///     }))
+    /// }
+    /// ```
+    fn select2<B>(self, other: B) -> Select2<Self, B::Future>
+        where B: IntoFuture, Self: Sized
+    {
+        select2::new(self, other.into_future())
+    }
+
+    /// Joins the result of two futures, waiting for them both to complete.
+    ///
+    /// This function will return a new future which awaits both this and the
+    /// `other` future to complete. The returned future will finish with a tuple
+    /// of both results.
+    ///
+    /// Both futures must have the same error type, and if either finishes with
+    /// an error then the other will be dropped and that error will be
+    /// returned.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let a = future::ok::<u32, u32>(1);
+    /// let b = future::ok::<u32, u32>(2);
+    /// let pair = a.join(b);
+    ///
+    /// assert_eq!(pair.wait(), Ok((1, 2)));
+    /// ```
+    ///
+    /// If one or both of the joined `Future`s is errored, the resulting
+    /// `Future` will be errored:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let a = future::ok::<u32, u32>(1);
+    /// let b = future::err::<u32, u32>(2);
+    /// let pair = a.join(b);
+    ///
+    /// assert_eq!(pair.wait(), Err(2));
+    /// ```
+    fn join<B>(self, other: B) -> Join<Self, B::Future>
+        where B: IntoFuture<Error=Self::Error>,
+              Self: Sized,
+    {
+        let f = join::new(self, other.into_future());
+        assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
+    }
+
+    /// Same as `join`, but with more futures.
+    fn join3<B, C>(self, b: B, c: C) -> Join3<Self, B::Future, C::Future>
+        where B: IntoFuture<Error=Self::Error>,
+              C: IntoFuture<Error=Self::Error>,
+              Self: Sized,
+    {
+        join::new3(self, b.into_future(), c.into_future())
+    }
+
+    /// Same as `join`, but with more futures.
+    fn join4<B, C, D>(self, b: B, c: C, d: D)
+                      -> Join4<Self, B::Future, C::Future, D::Future>
+        where B: IntoFuture<Error=Self::Error>,
+              C: IntoFuture<Error=Self::Error>,
+              D: IntoFuture<Error=Self::Error>,
+              Self: Sized,
+    {
+        join::new4(self, b.into_future(), c.into_future(), d.into_future())
+    }
+
+    /// Same as `join`, but with more futures.
+    fn join5<B, C, D, E>(self, b: B, c: C, d: D, e: E)
+                         -> Join5<Self, B::Future, C::Future, D::Future, E::Future>
+        where B: IntoFuture<Error=Self::Error>,
+              C: IntoFuture<Error=Self::Error>,
+              D: IntoFuture<Error=Self::Error>,
+              E: IntoFuture<Error=Self::Error>,
+              Self: Sized,
+    {
+        join::new5(self, b.into_future(), c.into_future(), d.into_future(),
+                   e.into_future())
+    }
+
+    /// Convert this future into a single element stream.
+    ///
+    /// The returned stream contains single success if this future resolves to
+    /// success or single error if this future resolves into error.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<_, bool>(17);
+    /// let mut stream = future.into_stream();
+    /// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+    /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+    ///
+    /// let future = future::err::<bool, _>(19);
+    /// let mut stream = future.into_stream();
+    /// assert_eq!(Err(19), stream.poll());
+    /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+    /// ```
+    fn into_stream(self) -> IntoStream<Self>
+        where Self: Sized
+    {
+        into_stream::new(self)
+    }
+
+    /// Flatten the execution of this future when the successful result of this
+    /// future is itself another future.
+    ///
+    /// This can be useful when combining futures together to flatten the
+    /// computation out the final result. This method can only be called
+    /// when the successful result of this future itself implements the
+    /// `IntoFuture` trait and the error can be created from this future's error
+    /// type.
+    ///
+    /// This method is roughly equivalent to `self.and_then(|x| x)`.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let nested_future = future::ok::<_, u32>(future::ok::<u32, u32>(1));
+    /// let future = nested_future.flatten();
+    /// assert_eq!(future.wait(), Ok(1));
+    /// ```
+    ///
+    /// Calling `flatten` on an errored `Future`, or if the inner `Future` is
+    /// errored, will result in an errored `Future`:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let nested_future = future::ok::<_, u32>(future::err::<u32, u32>(1));
+    /// let future = nested_future.flatten();
+    /// assert_eq!(future.wait(), Err(1));
+    /// ```
+    fn flatten(self) -> Flatten<Self>
+        where Self::Item: IntoFuture,
+        <<Self as Future>::Item as IntoFuture>::Error:
+            From<<Self as Future>::Error>,
+        Self: Sized
+    {
+        let f = flatten::new(self);
+        assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
+                        <<Self as Future>::Item as IntoFuture>::Error,
+                        _>(f)
+    }
+
+    /// Flatten the execution of this future when the successful result of this
+    /// future is a stream.
+    ///
+    /// This can be useful when stream initialization is deferred, and it is
+    /// convenient to work with that stream as if stream was available at the
+    /// call site.
+    ///
+    /// Note that this function consumes this future and returns a wrapped
+    /// version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    /// use futures::stream;
+    ///
+    /// let stream_items = vec![17, 18, 19];
+    /// let future_of_a_stream = future::ok::<_, bool>(stream::iter_ok(stream_items));
+    ///
+    /// let stream = future_of_a_stream.flatten_stream();
+    ///
+    /// let mut iter = stream.wait();
+    /// assert_eq!(Ok(17), iter.next().unwrap());
+    /// assert_eq!(Ok(18), iter.next().unwrap());
+    /// assert_eq!(Ok(19), iter.next().unwrap());
+    /// assert_eq!(None, iter.next());
+    /// ```
+    fn flatten_stream(self) -> FlattenStream<Self>
+        where <Self as Future>::Item: stream::Stream<Error=Self::Error>,
+              Self: Sized
+    {
+        flatten_stream::new(self)
+    }
+
+    /// Fuse a future such that `poll` will never again be called once it has
+    /// completed.
+    ///
+    /// Currently once a future has returned `Ready` or `Err` from
+    /// `poll` any further calls could exhibit bad behavior such as blocking
+    /// forever, panicking, never returning, etc. If it is known that `poll`
+    /// may be called too often then this method can be used to ensure that it
+    /// has defined semantics.
+    ///
+    /// Once a future has been `fuse`d and it returns a completion from `poll`,
+    /// then it will forever return `NotReady` from `poll` again (never
+    /// resolve).  This, unlike the trait's `poll` method, is guaranteed.
+    ///
+    /// This combinator will drop this future as soon as it's been completed to
+    /// ensure resources are reclaimed as soon as possible.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let mut future = future::ok::<i32, u32>(2);
+    /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
+    ///
+    /// // Normally, a call such as this would panic:
+    /// //future.poll();
+    ///
+    /// // This, however, is guaranteed to not panic
+    /// let mut future = future::ok::<i32, u32>(2).fuse();
+    /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
+    /// assert_eq!(future.poll(), Ok(Async::NotReady));
+    /// ```
+    fn fuse(self) -> Fuse<Self>
+        where Self: Sized
+    {
+        let f = fuse::new(self);
+        assert_future::<Self::Item, Self::Error, _>(f)
+    }
+
+    /// Do something with the item of a future, passing it on.
+    ///
+    /// When using futures, you'll often chain several of them together.
+    /// While working on such code, you might want to check out what's happening at
+    /// various parts in the pipeline. To do that, insert a call to inspect().
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<u32, u32>(1);
+    /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x));
+    /// assert_eq!(new_future.wait(), Ok(1));
+    /// ```
+    fn inspect<F>(self, f: F) -> Inspect<Self, F>
+        where F: FnOnce(&Self::Item) -> (),
+              Self: Sized,
+    {
+        assert_future::<Self::Item, Self::Error, _>(inspect::new(self, f))
+    }
+
+    /// Catches unwinding panics while polling the future.
+    ///
+    /// In general, panics within a future can propagate all the way out to the
+    /// task level. This combinator makes it possible to halt unwinding within
+    /// the future itself. It's most commonly used within task executors. It's
+    /// not recommended to use this for error handling.
+    ///
+    /// Note that this method requires the `UnwindSafe` bound from the standard
+    /// library. This isn't always applied automatically, and the standard
+    /// library provides an `AssertUnwindSafe` wrapper type to apply it
+    /// after-the fact. To assist using this method, the `Future` trait is also
+    /// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
+    ///
+    /// let mut future = future::ok::<i32, u32>(2);
+    /// assert!(future.catch_unwind().wait().is_ok());
+    ///
+    /// let mut future = future::lazy(|| -> FutureResult<i32, u32> {
+    ///     panic!();
+    ///     future::ok::<i32, u32>(2)
+    /// });
+    /// assert!(future.catch_unwind().wait().is_err());
+    /// ```
+    #[cfg(feature = "use_std")]
+    fn catch_unwind(self) -> CatchUnwind<Self>
+        where Self: Sized + ::std::panic::UnwindSafe
+    {
+        catch_unwind::new(self)
+    }
+
+    /// Create a cloneable handle to this future where all handles will resolve
+    /// to the same result.
+    ///
+    /// The shared() method provides a method to convert any future into a
+    /// cloneable future. It enables a future to be polled by multiple threads.
+    ///
+    /// The returned `Shared` future resolves successfully with
+    /// `SharedItem<Self::Item>` or erroneously with `SharedError<Self::Error>`.
+    /// Both `SharedItem` and `SharedError` implements `Deref` to allow shared
+    /// access to the underlying result. Ownership of `Self::Item` and
+    /// `Self::Error` cannot currently be reclaimed.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<_, bool>(6);
+    /// let shared1 = future.shared();
+    /// let shared2 = shared1.clone();
+    /// assert_eq!(6, *shared1.wait().unwrap());
+    /// assert_eq!(6, *shared2.wait().unwrap());
+    /// ```
+    ///
+    /// ```
+    /// use std::thread;
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<_, bool>(6);
+    /// let shared1 = future.shared();
+    /// let shared2 = shared1.clone();
+    /// let join_handle = thread::spawn(move || {
+    ///     assert_eq!(6, *shared2.wait().unwrap());
+    /// });
+    /// assert_eq!(6, *shared1.wait().unwrap());
+    /// join_handle.join().unwrap();
+    /// ```
+    #[cfg(feature = "use_std")]
+    fn shared(self) -> Shared<Self>
+        where Self: Sized
+    {
+        shared::new(self)
+    }
+}
+
+impl<'a, F: ?Sized + Future> Future for &'a mut F {
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        (**self).poll()
+    }
+}
+
+// Just a helper function to ensure the futures we're returning all have the
+// right implementations.
+fn assert_future<A, B, F>(t: F) -> F
+    where F: Future<Item=A, Error=B>,
+{
+    t
+}
+
+/// Class of types which can be converted into a future.
+///
+/// This trait is very similar to the `IntoIterator` trait and is intended to be
+/// used in a very similar fashion.
+pub trait IntoFuture {
+    /// The future that this type can be converted into.
+    type Future: Future<Item=Self::Item, Error=Self::Error>;
+
+    /// The item that the future may resolve with.
+    type Item;
+    /// The error that the future may resolve with.
+    type Error;
+
+    /// Consumes this object and produces a future.
+    fn into_future(self) -> Self::Future;
+}
+
+impl<F: Future> IntoFuture for F {
+    type Future = F;
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn into_future(self) -> F {
+        self
+    }
+}
+
+impl<T, E> IntoFuture for result::Result<T, E> {
+    type Future = FutureResult<T, E>;
+    type Item = T;
+    type Error = E;
+
+    fn into_future(self) -> FutureResult<T, E> {
+        result(self)
+    }
+}
+
+/// Asynchronous conversion from a type `T`.
+///
+/// This trait is analogous to `std::convert::From`, adapted to asynchronous
+/// computation.
+pub trait FutureFrom<T>: Sized {
+    /// The future for the conversion.
+    type Future: Future<Item=Self, Error=Self::Error>;
+
+    /// Possible errors during conversion.
+    type Error;
+
+    /// Consume the given value, beginning the conversion.
+    fn future_from(T) -> Self::Future;
+}
+
+/// A trait for types which can spawn fresh futures.
+///
+/// This trait is typically implemented for "executors", or those types which
+/// can execute futures to completion. Futures passed to `Spawn::spawn`
+/// typically get turned into a *task* and are then driven to completion.
+///
+/// On spawn, the executor takes ownership of the future and becomes responsible
+/// to call `Future::poll()` whenever a readiness notification is raised.
+pub trait Executor<F: Future<Item = (), Error = ()>> {
+    /// Spawns a future to run on this `Executor`, typically in the
+    /// "background".
+    ///
+    /// This function will return immediately, and schedule the future `future`
+    /// to run on `self`. The details of scheduling and execution are left to
+    /// the implementations of `Executor`, but this is typically a primary point
+    /// for injecting concurrency in a futures-based system. Futures spawned
+    /// through this `execute` function tend to run concurrently while they're
+    /// waiting on events.
+    ///
+    /// # Errors
+    ///
+    /// Implementers of this trait are allowed to reject accepting this future
+    /// as well. This can happen for various reason such as:
+    ///
+    /// * The executor is shut down
+    /// * The executor has run out of capacity to execute futures
+    ///
+    /// The decision is left to the caller how to work with this form of error.
+    /// The error returned transfers ownership of the future back to the caller.
+    fn execute(&self, future: F) -> Result<(), ExecuteError<F>>;
+}
+
+/// Errors returned from the `Spawn::spawn` function.
+pub struct ExecuteError<F> {
+    future: F,
+    kind: ExecuteErrorKind,
+}
+
+/// Kinds of errors that can be returned from the `Execute::spawn` function.
+///
+/// Executors which may not always be able to accept a future may return one of
+/// these errors, indicating why it was unable to spawn a future.
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub enum ExecuteErrorKind {
+    /// This executor has shut down and will no longer accept new futures to
+    /// spawn.
+    Shutdown,
+
+    /// This executor has no more capacity to run more futures. Other futures
+    /// need to finish before this executor can accept another.
+    NoCapacity,
+
+    #[doc(hidden)]
+    __Nonexhaustive,
+}
+
+impl<F> ExecuteError<F> {
+    /// Create a new `ExecuteError`
+    pub fn new(kind: ExecuteErrorKind, future: F) -> ExecuteError<F> {
+        ExecuteError {
+            future: future,
+            kind: kind,
+        }
+    }
+
+    /// Returns the associated reason for the error
+    pub fn kind(&self) -> ExecuteErrorKind {
+        self.kind
+    }
+
+    /// Consumes self and returns the original future that was spawned.
+    pub fn into_future(self) -> F {
+        self.future
+    }
+}
+
+impl<F> fmt::Debug for ExecuteError<F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.kind {
+            ExecuteErrorKind::Shutdown => "executor has shut down".fmt(f),
+            ExecuteErrorKind::NoCapacity => "executor has no more capacity".fmt(f),
+            ExecuteErrorKind::__Nonexhaustive => panic!(),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/option.rs b/rustc_deps/vendor/futures/src/future/option.rs
new file mode 100644
index 0000000..1b204d3
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/option.rs
@@ -0,0 +1,15 @@
+//! Definition of the `Option` (optional step) combinator
+
+use {Future, Poll, Async};
+
+impl<F, T, E> Future for Option<F> where F: Future<Item=T, Error=E> {
+    type Item = Option<T>;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match *self {
+            None => Ok(Async::Ready(None)),
+            Some(ref mut x) => x.poll().map(|x| x.map(Some)),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/or_else.rs b/rustc_deps/vendor/futures/src/future/or_else.rs
new file mode 100644
index 0000000..bc13413
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/or_else.rs
@@ -0,0 +1,39 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `or_else` combinator, chaining a computation onto the end of
+/// a future which fails with an error.
+///
+/// This is created by the `Future::or_else` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct OrElse<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> OrElse<A, B, F>
+    where A: Future,
+          B: IntoFuture<Item=A::Item>,
+{
+    OrElse {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for OrElse<A, B, F>
+    where A: Future,
+          B: IntoFuture<Item=A::Item>,
+          F: FnOnce(A::Error) -> B,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self) -> Poll<B::Item, B::Error> {
+        self.state.poll(|a, f| {
+            match a {
+                Ok(item) => Ok(Ok(item)),
+                Err(e) => Ok(Err(f(e).into_future()))
+            }
+        })
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/poll_fn.rs b/rustc_deps/vendor/futures/src/future/poll_fn.rs
new file mode 100644
index 0000000..d96bf2f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/poll_fn.rs
@@ -0,0 +1,45 @@
+//! Definition of the `PollFn` adapter combinator
+
+use {Future, Poll};
+
+/// A future which adapts a function returning `Poll`.
+///
+/// Created by the `poll_fn` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct PollFn<F> {
+    inner: F,
+}
+
+/// Creates a new future wrapping around a function returning `Poll`.
+///
+/// Polling the returned future delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::poll_fn;
+/// use futures::{Async, Poll};
+///
+/// fn read_line() -> Poll<String, std::io::Error> {
+///     Ok(Async::Ready("Hello, World!".into()))
+/// }
+///
+/// let read_future = poll_fn(read_line);
+/// ```
+pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
+    where F: FnMut() -> ::Poll<T, E>
+{
+    PollFn { inner: f }
+}
+
+impl<T, E, F> Future for PollFn<F>
+    where F: FnMut() -> Poll<T, E>
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        (self.inner)()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/result.rs b/rustc_deps/vendor/futures/src/future/result.rs
new file mode 100644
index 0000000..5c44a63
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/result.rs
@@ -0,0 +1,81 @@
+//! Definition of the `Result` (immediately finished) combinator
+
+use core::result;
+
+use {Future, Poll, Async};
+
+/// A future representing a value that is immediately ready.
+///
+/// Created by the `result` function.
+#[derive(Debug, Clone)]
+#[must_use = "futures do nothing unless polled"]
+// TODO: rename this to `Result` on the next major version
+pub struct FutureResult<T, E> {
+    inner: Option<result::Result<T, E>>,
+}
+
+/// Creates a new "leaf future" which will resolve with the given result.
+///
+/// The returned future represents a computation which is finished immediately.
+/// This can be useful with the `finished` and `failed` base future types to
+/// convert an immediate value to a future to interoperate elsewhere.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_1 = result::<u32, u32>(Ok(1));
+/// let future_of_err_2 = result::<u32, u32>(Err(2));
+/// ```
+pub fn result<T, E>(r: result::Result<T, E>) -> FutureResult<T, E> {
+    FutureResult { inner: Some(r) }
+}
+
+/// Creates a "leaf future" from an immediate value of a finished and
+/// successful computation.
+///
+/// The returned future is similar to `result` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_1 = ok::<u32, u32>(1);
+/// ```
+pub fn ok<T, E>(t: T) -> FutureResult<T, E> {
+    result(Ok(t))
+}
+
+/// Creates a "leaf future" from an immediate value of a failed computation.
+///
+/// The returned future is similar to `result` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_err_1 = err::<u32, u32>(1);
+/// ```
+pub fn err<T, E>(e: E) -> FutureResult<T, E> {
+    result(Err(e))
+}
+
+impl<T, E> Future for FutureResult<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        self.inner.take().expect("cannot poll Result twice").map(Async::Ready)
+    }
+}
+
+impl<T, E> From<Result<T, E>> for FutureResult<T, E> {
+    fn from(r: Result<T, E>) -> Self {
+        result(r)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/select.rs b/rustc_deps/vendor/futures/src/future/select.rs
new file mode 100644
index 0000000..c48e1c0
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/select.rs
@@ -0,0 +1,86 @@
+use {Future, Poll, Async};
+
+/// Future for the `select` combinator, waiting for one of two futures to
+/// complete.
+///
+/// This is created by the `Future::select` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Select<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+    inner: Option<(A, B)>,
+}
+
+/// Future yielded as the second result in a `Select` future.
+///
+/// This sentinel future represents the completion of the second future to a
+/// `select` which finished second.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectNext<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+    inner: OneOf<A, B>,
+}
+
+#[derive(Debug)]
+enum OneOf<A, B> where A: Future, B: Future {
+    A(A),
+    B(B),
+}
+
+pub fn new<A, B>(a: A, b: B) -> Select<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>
+{
+    Select {
+        inner: Some((a, b)),
+    }
+}
+
+impl<A, B> Future for Select<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>,
+{
+    type Item = (A::Item, SelectNext<A, B>);
+    type Error = (A::Error, SelectNext<A, B>);
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let (ret, is_a) = match self.inner {
+            Some((ref mut a, ref mut b)) => {
+                match a.poll() {
+                    Err(a) => (Err(a), true),
+                    Ok(Async::Ready(a)) => (Ok(a), true),
+                    Ok(Async::NotReady) => {
+                        match b.poll() {
+                            Err(a) => (Err(a), false),
+                            Ok(Async::Ready(a)) => (Ok(a), false),
+                            Ok(Async::NotReady) => return Ok(Async::NotReady),
+                        }
+                    }
+                }
+            }
+            None => panic!("cannot poll select twice"),
+        };
+
+        let (a, b) = self.inner.take().unwrap();
+        let next = if is_a {OneOf::B(b)} else {OneOf::A(a)};
+        let next = SelectNext { inner: next };
+        match ret {
+            Ok(a) => Ok(Async::Ready((a, next))),
+            Err(e) => Err((e, next)),
+        }
+    }
+}
+
+impl<A, B> Future for SelectNext<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>,
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        match self.inner {
+            OneOf::A(ref mut a) => a.poll(),
+            OneOf::B(ref mut b) => b.poll(),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/select2.rs b/rustc_deps/vendor/futures/src/future/select2.rs
new file mode 100644
index 0000000..073f67b
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/select2.rs
@@ -0,0 +1,39 @@
+use {Future, Poll, Async};
+use future::Either;
+
+/// Future for the `select2` combinator, waiting for one of two differently-typed
+/// futures to complete.
+///
+/// This is created by the [`Future::select2`] method.
+///
+/// [`Future::select2`]: trait.Future.html#method.select2
+#[must_use = "futures do nothing unless polled"]
+#[derive(Debug)]
+pub struct Select2<A, B> {
+    inner: Option<(A, B)>,
+}
+
+pub fn new<A, B>(a: A, b: B) -> Select2<A, B> {
+    Select2 { inner: Some((a, b)) }
+}
+
+impl<A, B> Future for Select2<A, B> where A: Future, B: Future {
+    type Item = Either<(A::Item, B), (B::Item, A)>;
+    type Error = Either<(A::Error, B), (B::Error, A)>;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice");
+        match a.poll() {
+            Err(e) => Err(Either::A((e, b))),
+            Ok(Async::Ready(x)) => Ok(Async::Ready(Either::A((x, b)))),
+            Ok(Async::NotReady) => match b.poll() {
+                Err(e) => Err(Either::B((e, a))),
+                Ok(Async::Ready(x)) => Ok(Async::Ready(Either::B((x, a)))),
+                Ok(Async::NotReady) => {
+                    self.inner = Some((a, b));
+                    Ok(Async::NotReady)
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/select_all.rs b/rustc_deps/vendor/futures/src/future/select_all.rs
new file mode 100644
index 0000000..1fbc986
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/select_all.rs
@@ -0,0 +1,71 @@
+//! Definition of the `SelectAll`, finding the first future in a list that
+//! finishes.
+
+use std::mem;
+use std::prelude::v1::*;
+
+use {Future, IntoFuture, Poll, Async};
+
+/// Future for the `select_all` combinator, waiting for one of any of a list of
+/// futures to complete.
+///
+/// This is created by the `select_all` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectAll<A> where A: Future {
+    inner: Vec<A>,
+}
+
+#[doc(hidden)]
+pub type SelectAllNext<A> = A;
+
+/// Creates a new future which will select over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready. Upon
+/// completion or failure the item resolved will be returned, along with the
+/// index of the future that was ready and the list of all the remaining
+/// futures.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_all<I>(iter: I) -> SelectAll<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    let ret = SelectAll {
+        inner: iter.into_iter()
+                   .map(|a| a.into_future())
+                   .collect(),
+    };
+    assert!(ret.inner.len() > 0);
+    ret
+}
+
+impl<A> Future for SelectAll<A>
+    where A: Future,
+{
+    type Item = (A::Item, usize, Vec<A>);
+    type Error = (A::Error, usize, Vec<A>);
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
+            match f.poll() {
+                Ok(Async::NotReady) => None,
+                Ok(Async::Ready(e)) => Some((i, Ok(e))),
+                Err(e) => Some((i, Err(e))),
+            }
+        }).next();
+        match item {
+            Some((idx, res)) => {
+                self.inner.remove(idx);
+                let rest = mem::replace(&mut self.inner, Vec::new());
+                match res {
+                    Ok(e) => Ok(Async::Ready((e, idx, rest))),
+                    Err(e) => Err((e, idx, rest)),
+                }
+            }
+            None => Ok(Async::NotReady),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/select_ok.rs b/rustc_deps/vendor/futures/src/future/select_ok.rs
new file mode 100644
index 0000000..f122a0e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/select_ok.rs
@@ -0,0 +1,81 @@
+//! Definition of the `SelectOk` combinator, finding the first successful future
+//! in a list.
+
+use std::mem;
+use std::prelude::v1::*;
+
+use {Future, IntoFuture, Poll, Async};
+
+/// Future for the `select_ok` combinator, waiting for one of any of a list of
+/// futures to successfully complete. Unlike `select_all`, this future ignores all
+/// but the last error, if there are any.
+///
+/// This is created by the `select_ok` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectOk<A> where A: Future {
+    inner: Vec<A>,
+}
+
+/// Creates a new future which will select the first successful future over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike
+/// `select_all`, this will only return the first successful completion, or the last
+/// failure. This is useful in contexts where any success is desired and failures
+/// are ignored, unless all the futures fail.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_ok<I>(iter: I) -> SelectOk<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    let ret = SelectOk {
+        inner: iter.into_iter()
+                   .map(|a| a.into_future())
+                   .collect(),
+    };
+    assert!(ret.inner.len() > 0);
+    ret
+}
+
+impl<A> Future for SelectOk<A> where A: Future {
+    type Item = (A::Item, Vec<A>);
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        // loop until we've either exhausted all errors, a success was hit, or nothing is ready
+        loop {
+            let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
+                match f.poll() {
+                    Ok(Async::NotReady) => None,
+                    Ok(Async::Ready(e)) => Some((i, Ok(e))),
+                    Err(e) => Some((i, Err(e))),
+                }
+            }).next();
+
+            match item {
+                Some((idx, res)) => {
+                    // always remove Ok or Err, if it's not the last Err continue looping
+                    drop(self.inner.remove(idx));
+                    match res {
+                        Ok(e) => {
+                            let rest = mem::replace(&mut self.inner, Vec::new());
+                            return Ok(Async::Ready((e, rest)))
+                        },
+                        Err(e) => {
+                            if self.inner.is_empty() {
+                                return Err(e)
+                            }
+                        },
+                    }
+                }
+                None => {
+                    // based on the filter above, nothing is ready, return
+                    return Ok(Async::NotReady)
+                },
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/shared.rs b/rustc_deps/vendor/futures/src/future/shared.rs
new file mode 100644
index 0000000..25417ef
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/shared.rs
@@ -0,0 +1,312 @@
+//! Definition of the Shared combinator, a future that is cloneable,
+//! and can be polled in multiple threads.
+//!
+//! # Examples
+//!
+//! ```
+//! use futures::future::*;
+//!
+//! let future = ok::<_, bool>(6);
+//! let shared1 = future.shared();
+//! let shared2 = shared1.clone();
+//! assert_eq!(6, *shared1.wait().unwrap());
+//! assert_eq!(6, *shared2.wait().unwrap());
+//! ```
+
+use {Future, Poll, Async};
+use task::{self, Task};
+use executor::{self, Notify, Spawn};
+
+use std::{error, fmt, mem, ops};
+use std::cell::UnsafeCell;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::collections::HashMap;
+
+/// A future that is cloneable and can be polled in multiple threads.
+/// Use `Future::shared()` method to convert any future into a `Shared` future.
+#[must_use = "futures do nothing unless polled"]
+pub struct Shared<F: Future> {
+    inner: Arc<Inner<F>>,
+    waiter: usize,
+}
+
+impl<F> fmt::Debug for Shared<F>
+    where F: Future + fmt::Debug,
+          F::Item: fmt::Debug,
+          F::Error: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Shared")
+            .field("inner", &self.inner)
+            .field("waiter", &self.waiter)
+            .finish()
+    }
+}
+
+struct Inner<F: Future> {
+    next_clone_id: AtomicUsize,
+    future: UnsafeCell<Option<Spawn<F>>>,
+    result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>,
+    notifier: Arc<Notifier>,
+}
+
+struct Notifier {
+    state: AtomicUsize,
+    waiters: Mutex<HashMap<usize, Task>>,
+}
+
+const IDLE: usize = 0;
+const POLLING: usize = 1;
+const REPOLL: usize = 2;
+const COMPLETE: usize = 3;
+const POISONED: usize = 4;
+
+pub fn new<F: Future>(future: F) -> Shared<F> {
+    Shared {
+        inner: Arc::new(Inner {
+            next_clone_id: AtomicUsize::new(1),
+            notifier: Arc::new(Notifier {
+                state: AtomicUsize::new(IDLE),
+                waiters: Mutex::new(HashMap::new()),
+            }),
+            future: UnsafeCell::new(Some(executor::spawn(future))),
+            result: UnsafeCell::new(None),
+        }),
+        waiter: 0,
+    }
+}
+
+impl<F> Shared<F> where F: Future {
+    // TODO: make this private
+    #[deprecated(since = "0.1.12", note = "use `Future::shared` instead")]
+    #[cfg(feature = "with-deprecated")]
+    #[doc(hidden)]
+    pub fn new(future: F) -> Self {
+        new(future)
+    }
+
+    /// If any clone of this `Shared` has completed execution, returns its result immediately
+    /// without blocking. Otherwise, returns None without triggering the work represented by
+    /// this `Shared`.
+    pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> {
+        match self.inner.notifier.state.load(SeqCst) {
+            COMPLETE => {
+                Some(unsafe { self.clone_result() })
+            }
+            POISONED => panic!("inner future panicked during poll"),
+            _ => None,
+        }
+    }
+
+    fn set_waiter(&mut self) {
+        let mut waiters = self.inner.notifier.waiters.lock().unwrap();
+        waiters.insert(self.waiter, task::current());
+    }
+
+    unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> {
+        match *self.inner.result.get() {
+            Some(Ok(ref item)) => Ok(SharedItem { item: item.item.clone() }),
+            Some(Err(ref e)) => Err(SharedError { error: e.error.clone() }),
+            _ => unreachable!(),
+        }
+    }
+
+    fn complete(&self) {
+        unsafe { *self.inner.future.get() = None };
+        self.inner.notifier.state.store(COMPLETE, SeqCst);
+        self.inner.notifier.notify(0);
+    }
+}
+
+impl<F> Future for Shared<F>
+    where F: Future
+{
+    type Item = SharedItem<F::Item>;
+    type Error = SharedError<F::Error>;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.set_waiter();
+
+        match self.inner.notifier.state.compare_and_swap(IDLE, POLLING, SeqCst) {
+            IDLE => {
+                // Lock acquired, fall through
+            }
+            POLLING | REPOLL => {
+                // Another task is currently polling, at this point we just want
+                // to ensure that our task handle is currently registered
+
+                return Ok(Async::NotReady);
+            }
+            COMPLETE => {
+                return unsafe { self.clone_result().map(Async::Ready) };
+            }
+            POISONED => panic!("inner future panicked during poll"),
+            _ => unreachable!(),
+        }
+
+        loop {
+            struct Reset<'a>(&'a AtomicUsize);
+
+            impl<'a> Drop for Reset<'a> {
+                fn drop(&mut self) {
+                    use std::thread;
+
+                    if thread::panicking() {
+                        self.0.store(POISONED, SeqCst);
+                    }
+                }
+            }
+
+            let _reset = Reset(&self.inner.notifier.state);
+
+            // Poll the future
+            let res = unsafe {
+                (*self.inner.future.get()).as_mut().unwrap()
+                    .poll_future_notify(&self.inner.notifier, 0)
+            };
+            match res {
+                Ok(Async::NotReady) => {
+                    // Not ready, try to release the handle
+                    match self.inner.notifier.state.compare_and_swap(POLLING, IDLE, SeqCst) {
+                        POLLING => {
+                            // Success
+                            return Ok(Async::NotReady);
+                        }
+                        REPOLL => {
+                            // Gotta poll again!
+                            let prev = self.inner.notifier.state.swap(POLLING, SeqCst);
+                            assert_eq!(prev, REPOLL);
+                        }
+                        _ => unreachable!(),
+                    }
+
+                }
+                Ok(Async::Ready(i)) => {
+                    unsafe {
+                        (*self.inner.result.get()) = Some(Ok(SharedItem { item: Arc::new(i) }));
+                    }
+
+                    break;
+                }
+                Err(e) => {
+                    unsafe {
+                        (*self.inner.result.get()) = Some(Err(SharedError { error: Arc::new(e) }));
+                    }
+
+                    break;
+                }
+            }
+        }
+
+        self.complete();
+        unsafe { self.clone_result().map(Async::Ready) }
+    }
+}
+
+impl<F> Clone for Shared<F> where F: Future {
+    fn clone(&self) -> Self {
+        let next_clone_id = self.inner.next_clone_id.fetch_add(1, SeqCst);
+
+        Shared {
+            inner: self.inner.clone(),
+            waiter: next_clone_id,
+        }
+    }
+}
+
+impl<F> Drop for Shared<F> where F: Future {
+    fn drop(&mut self) {
+        let mut waiters = self.inner.notifier.waiters.lock().unwrap();
+        waiters.remove(&self.waiter);
+    }
+}
+
+impl Notify for Notifier {
+    fn notify(&self, _id: usize) {
+        self.state.compare_and_swap(POLLING, REPOLL, SeqCst);
+
+        let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new());
+
+        for (_, waiter) in waiters {
+            waiter.notify();
+        }
+    }
+}
+
+// The `F` is synchronized by a lock, so `F` doesn't need
+// to be `Sync`. However, its `Item` or `Error` are exposed
+// through an `Arc` but not lock, so they must be `Send + Sync`.
+unsafe impl<F> Send for Inner<F>
+    where F: Future + Send,
+          F::Item: Send + Sync,
+          F::Error: Send + Sync,
+{}
+
+unsafe impl<F> Sync for Inner<F>
+    where F: Future + Send,
+          F::Item: Send + Sync,
+          F::Error: Send + Sync,
+{}
+
+impl<F> fmt::Debug for Inner<F>
+    where F: Future + fmt::Debug,
+          F::Item: fmt::Debug,
+          F::Error: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Inner")
+            .finish()
+    }
+}
+
+/// A wrapped item of the original future that is cloneable and implements Deref
+/// for ease of use.
+#[derive(Clone, Debug)]
+pub struct SharedItem<T> {
+    item: Arc<T>,
+}
+
+impl<T> ops::Deref for SharedItem<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.item.as_ref()
+    }
+}
+
+/// A wrapped error of the original future that is cloneable and implements Deref
+/// for ease of use.
+#[derive(Clone, Debug)]
+pub struct SharedError<E> {
+    error: Arc<E>,
+}
+
+impl<E> ops::Deref for SharedError<E> {
+    type Target = E;
+
+    fn deref(&self) -> &E {
+        &self.error.as_ref()
+    }
+}
+
+impl<E> fmt::Display for SharedError<E>
+    where E: fmt::Display,
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.error.fmt(f)
+    }
+}
+
+impl<E> error::Error for SharedError<E>
+    where E: error::Error,
+{
+    fn description(&self) -> &str {
+        self.error.description()
+    }
+
+    fn cause(&self) -> Option<&error::Error> {
+        self.error.cause()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/future/then.rs b/rustc_deps/vendor/futures/src/future/then.rs
new file mode 100644
index 0000000..188fb8f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/future/then.rs
@@ -0,0 +1,36 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `then` combinator, chaining computations on the end of
+/// another future regardless of its outcome.
+///
+/// This is created by the `Future::then` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Then<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> Then<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+{
+    Then {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for Then<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+          F: FnOnce(Result<A::Item, A::Error>) -> B,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self) -> Poll<B::Item, B::Error> {
+        self.state.poll(|a, f| {
+            Ok(Err(f(a).into_future()))
+        })
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/lib.rs b/rustc_deps/vendor/futures/src/lib.rs
new file mode 100644
index 0000000..9a72090
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/lib.rs
@@ -0,0 +1,266 @@
+//! Zero-cost Futures in Rust
+//!
+//! This library is an implementation of futures in Rust which aims to provide
+//! a robust implementation of handling asynchronous computations, ergonomic
+//! composition and usage, and zero-cost abstractions over what would otherwise
+//! be written by hand.
+//!
+//! Futures are a concept for an object which is a proxy for another value that
+//! may not be ready yet. For example issuing an HTTP request may return a
+//! future for the HTTP response, as it probably hasn't arrived yet. With an
+//! object representing a value that will eventually be available, futures allow
+//! for powerful composition of tasks through basic combinators that can perform
+//! operations like chaining computations, changing the types of futures, or
+//! waiting for two futures to complete at the same time.
+//!
+//! You can find extensive tutorials and documentations at [https://tokio.rs]
+//! for both this crate (asynchronous programming in general) as well as the
+//! Tokio stack to perform async I/O with.
+//!
+//! [https://tokio.rs]: https://tokio.rs
+//!
+//! ## Installation
+//!
+//! Add this to your `Cargo.toml`:
+//!
+//! ```toml
+//! [dependencies]
+//! futures = "0.1"
+//! ```
+//!
+//! ## Examples
+//!
+//! Let's take a look at a few examples of how futures might be used:
+//!
+//! ```
+//! extern crate futures;
+//!
+//! use std::io;
+//! use std::time::Duration;
+//! use futures::prelude::*;
+//! use futures::future::Map;
+//!
+//! // A future is actually a trait implementation, so we can generically take a
+//! // future of any integer and return back a future that will resolve to that
+//! // value plus 10 more.
+//! //
+//! // Note here that like iterators, we're returning the `Map` combinator in
+//! // the futures crate, not a boxed abstraction. This is a zero-cost
+//! // construction of a future.
+//! fn add_ten<F>(future: F) -> Map<F, fn(i32) -> i32>
+//!     where F: Future<Item=i32>,
+//! {
+//!     fn add(a: i32) -> i32 { a + 10 }
+//!     future.map(add)
+//! }
+//!
+//! // Not only can we modify one future, but we can even compose them together!
+//! // Here we have a function which takes two futures as input, and returns a
+//! // future that will calculate the sum of their two values.
+//! //
+//! // Above we saw a direct return value of the `Map` combinator, but
+//! // performance isn't always critical and sometimes it's more ergonomic to
+//! // return a trait object like we do here. Note though that there's only one
+//! // allocation here, not any for the intermediate futures.
+//! fn add<'a, A, B>(a: A, b: B) -> Box<Future<Item=i32, Error=A::Error> + 'a>
+//!     where A: Future<Item=i32> + 'a,
+//!           B: Future<Item=i32, Error=A::Error> + 'a,
+//! {
+//!     Box::new(a.join(b).map(|(a, b)| a + b))
+//! }
+//!
+//! // Futures also allow chaining computations together, starting another after
+//! // the previous finishes. Here we wait for the first computation to finish,
+//! // and then decide what to do depending on the result.
+//! fn download_timeout(url: &str,
+//!                     timeout_dur: Duration)
+//!                     -> Box<Future<Item=Vec<u8>, Error=io::Error>> {
+//!     use std::io;
+//!     use std::net::{SocketAddr, TcpStream};
+//!
+//!     type IoFuture<T> = Box<Future<Item=T, Error=io::Error>>;
+//!
+//!     // First thing to do is we need to resolve our URL to an address. This
+//!     // will likely perform a DNS lookup which may take some time.
+//!     let addr = resolve(url);
+//!
+//!     // After we acquire the address, we next want to open up a TCP
+//!     // connection.
+//!     let tcp = addr.and_then(|addr| connect(&addr));
+//!
+//!     // After the TCP connection is established and ready to go, we're off to
+//!     // the races!
+//!     let data = tcp.and_then(|conn| download(conn));
+//!
+//!     // That all might take awhile, though, so let's not wait too long for it
+//!     // to all come back. The `select` combinator here returns a future which
+//!     // resolves to the first value that's ready plus the next future.
+//!     //
+//!     // Note we can also use the `then` combinator which is similar to
+//!     // `and_then` above except that it receives the result of the
+//!     // computation, not just the successful value.
+//!     //
+//!     // Again note that all the above calls to `and_then` and the below calls
+//!     // to `map` and such require no allocations. We only ever allocate once
+//!     // we hit the `Box::new()` call at the end here, which means we've built
+//!     // up a relatively involved computation with only one box, and even that
+//!     // was optional!
+//!
+//!     let data = data.map(Ok);
+//!     let timeout = timeout(timeout_dur).map(Err);
+//!
+//!     let ret = data.select(timeout).then(|result| {
+//!         match result {
+//!             // One future succeeded, and it was the one which was
+//!             // downloading data from the connection.
+//!             Ok((Ok(data), _other_future)) => Ok(data),
+//!
+//!             // The timeout fired, and otherwise no error was found, so
+//!             // we translate this to an error.
+//!             Ok((Err(_timeout), _other_future)) => {
+//!                 Err(io::Error::new(io::ErrorKind::Other, "timeout"))
+//!             }
+//!
+//!             // A normal I/O error happened, so we pass that on through.
+//!             Err((e, _other_future)) => Err(e),
+//!         }
+//!     });
+//!     return Box::new(ret);
+//!
+//!     fn resolve(url: &str) -> IoFuture<SocketAddr> {
+//!         // ...
+//! #       panic!("unimplemented");
+//!     }
+//!
+//!     fn connect(hostname: &SocketAddr) -> IoFuture<TcpStream> {
+//!         // ...
+//! #       panic!("unimplemented");
+//!     }
+//!
+//!     fn download(stream: TcpStream) -> IoFuture<Vec<u8>> {
+//!         // ...
+//! #       panic!("unimplemented");
+//!     }
+//!
+//!     fn timeout(stream: Duration) -> IoFuture<()> {
+//!         // ...
+//! #       panic!("unimplemented");
+//!     }
+//! }
+//! # fn main() {}
+//! ```
+//!
+//! Some more information can also be found in the [README] for now, but
+//! otherwise feel free to jump in to the docs below!
+//!
+//! [README]: https://github.com/rust-lang-nursery/futures-rs#futures-rs
+
+#![no_std]
+#![deny(missing_docs, missing_debug_implementations)]
+#![doc(html_root_url = "https://docs.rs/futures/0.1")]
+#![cfg_attr(feature = "nightly", feature(pin))]
+
+#[macro_use]
+#[cfg(feature = "use_std")]
+extern crate std;
+
+macro_rules! if_std {
+    ($($i:item)*) => ($(
+        #[cfg(feature = "use_std")]
+        $i
+    )*)
+}
+
+#[macro_use]
+mod poll;
+pub use poll::{Poll, Async, AsyncSink, StartSend};
+
+pub mod future;
+pub use future::{Future, IntoFuture};
+
+pub mod stream;
+pub use stream::Stream;
+
+pub mod sink;
+pub use sink::Sink;
+
+#[deprecated(since = "0.1.4", note = "import through the future module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use future::{done, empty, failed, finished, lazy};
+
+#[doc(hidden)]
+#[cfg(feature = "with-deprecated")]
+#[deprecated(since = "0.1.4", note = "import through the future module instead")]
+pub use future::{
+    Done, Empty, Failed, Finished, Lazy, AndThen, Flatten, FlattenStream, Fuse, IntoStream,
+    Join, Join3, Join4, Join5, Map, MapErr, OrElse, Select,
+    SelectNext, Then
+};
+
+#[cfg(feature = "use_std")]
+mod lock;
+mod task_impl;
+
+mod resultstream;
+
+pub mod task;
+pub mod executor;
+#[cfg(feature = "use_std")]
+pub mod sync;
+#[cfg(feature = "use_std")]
+pub mod unsync;
+
+
+if_std! {
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use sync::oneshot::channel as oneshot;
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use sync::oneshot::Receiver instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use sync::oneshot::Receiver as Oneshot;
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use sync::oneshot::Sender instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use sync::oneshot::Sender as Complete;
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "use sync::oneshot::Canceled instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use sync::oneshot::Canceled;
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "import through the future module instead")]
+    #[cfg(feature = "with-deprecated")]
+    #[allow(deprecated)]
+    pub use future::{BoxFuture, collect, select_all, select_ok};
+
+    #[doc(hidden)]
+    #[deprecated(since = "0.1.4", note = "import through the future module instead")]
+    #[cfg(feature = "with-deprecated")]
+    pub use future::{SelectAll, SelectAllNext, Collect, SelectOk};
+}
+
+/// A "prelude" for crates using the `futures` crate.
+///
+/// This prelude is similar to the standard library's prelude in that you'll
+/// almost always want to import its entire contents, but unlike the standard
+/// library's prelude you'll have to do so manually. An example of using this is:
+///
+/// ```
+/// use futures::prelude::*;
+/// ```
+///
+/// We may add items to this over time as they become ubiquitous as well, but
+/// otherwise this should help cut down on futures-related imports when you're
+/// working with the `futures` crate!
+pub mod prelude {
+    #[doc(no_inline)]
+    pub use {Future, Stream, Sink, Async, AsyncSink, Poll, StartSend};
+    #[doc(no_inline)]
+    pub use IntoFuture;
+}
diff --git a/rustc_deps/vendor/futures/src/lock.rs b/rustc_deps/vendor/futures/src/lock.rs
new file mode 100644
index 0000000..627c524
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/lock.rs
@@ -0,0 +1,107 @@
+//! A "mutex" which only supports `try_lock`
+//!
+//! As a futures library the eventual call to an event loop should be the only
+//! thing that ever blocks, so this is assisted with a fast user-space
+//! implementation of a lock that can only have a `try_lock` operation.
+
+extern crate core;
+
+use self::core::cell::UnsafeCell;
+use self::core::ops::{Deref, DerefMut};
+use self::core::sync::atomic::Ordering::SeqCst;
+use self::core::sync::atomic::AtomicBool;
+
+/// A "mutex" around a value, similar to `std::sync::Mutex<T>`.
+///
+/// This lock only supports the `try_lock` operation, however, and does not
+/// implement poisoning.
+#[derive(Debug)]
+pub struct Lock<T> {
+    locked: AtomicBool,
+    data: UnsafeCell<T>,
+}
+
+/// Sentinel representing an acquired lock through which the data can be
+/// accessed.
+pub struct TryLock<'a, T: 'a> {
+    __ptr: &'a Lock<T>,
+}
+
+// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are
+// intended to mirror the standard library's corresponding impls for `Mutex<T>`.
+//
+// If a `T` is sendable across threads, so is the lock, and `T` must be sendable
+// across threads to be `Sync` because it allows mutable access from multiple
+// threads.
+unsafe impl<T: Send> Send for Lock<T> {}
+unsafe impl<T: Send> Sync for Lock<T> {}
+
+impl<T> Lock<T> {
+    /// Creates a new lock around the given value.
+    pub fn new(t: T) -> Lock<T> {
+        Lock {
+            locked: AtomicBool::new(false),
+            data: UnsafeCell::new(t),
+        }
+    }
+
+    /// Attempts to acquire this lock, returning whether the lock was acquired or
+    /// not.
+    ///
+    /// If `Some` is returned then the data this lock protects can be accessed
+    /// through the sentinel. This sentinel allows both mutable and immutable
+    /// access.
+    ///
+    /// If `None` is returned then the lock is already locked, either elsewhere
+    /// on this thread or on another thread.
+    pub fn try_lock(&self) -> Option<TryLock<T>> {
+        if !self.locked.swap(true, SeqCst) {
+            Some(TryLock { __ptr: self })
+        } else {
+            None
+        }
+    }
+}
+
+impl<'a, T> Deref for TryLock<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        // The existence of `TryLock` represents that we own the lock, so we
+        // can safely access the data here.
+        unsafe { &*self.__ptr.data.get() }
+    }
+}
+
+impl<'a, T> DerefMut for TryLock<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // The existence of `TryLock` represents that we own the lock, so we
+        // can safely access the data here.
+        //
+        // Additionally, we're the *only* `TryLock` in existence so mutable
+        // access should be ok.
+        unsafe { &mut *self.__ptr.data.get() }
+    }
+}
+
+impl<'a, T> Drop for TryLock<'a, T> {
+    fn drop(&mut self) {
+        self.__ptr.locked.store(false, SeqCst);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Lock;
+
+    #[test]
+    fn smoke() {
+        let a = Lock::new(1);
+        let mut a1 = a.try_lock().unwrap();
+        assert!(a.try_lock().is_none());
+        assert_eq!(*a1, 1);
+        *a1 = 2;
+        drop(a1);
+        assert_eq!(*a.try_lock().unwrap(), 2);
+        assert_eq!(*a.try_lock().unwrap(), 2);
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/poll.rs b/rustc_deps/vendor/futures/src/poll.rs
new file mode 100644
index 0000000..c568e72
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/poll.rs
@@ -0,0 +1,105 @@
+/// A macro for extracting the successful type of a `Poll<T, E>`.
+///
+/// This macro bakes propagation of both errors and `NotReady` signals by
+/// returning early.
+#[macro_export]
+macro_rules! try_ready {
+    ($e:expr) => (match $e {
+        Ok($crate::Async::Ready(t)) => t,
+        Ok($crate::Async::NotReady) => return Ok($crate::Async::NotReady),
+        Err(e) => return Err(From::from(e)),
+    })
+}
+
+/// Return type of the `Future::poll` method, indicates whether a future's value
+/// is ready or not.
+///
+/// * `Ok(Async::Ready(t))` means that a future has successfully resolved
+/// * `Ok(Async::NotReady)` means that a future is not ready to complete yet
+/// * `Err(e)` means that a future has completed with the given failure
+pub type Poll<T, E> = Result<Async<T>, E>;
+
+/// Return type of future, indicating whether a value is ready or not.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Async<T> {
+    /// Represents that a value is immediately ready.
+    Ready(T),
+
+    /// Represents that a value is not ready yet, but may be so later.
+    NotReady,
+}
+
+impl<T> Async<T> {
+    /// Change the success value of this `Async` with the closure provided
+    pub fn map<F, U>(self, f: F) -> Async<U>
+        where F: FnOnce(T) -> U
+    {
+        match self {
+            Async::Ready(t) => Async::Ready(f(t)),
+            Async::NotReady => Async::NotReady,
+        }
+    }
+
+    /// Returns whether this is `Async::Ready`
+    pub fn is_ready(&self) -> bool {
+        match *self {
+            Async::Ready(_) => true,
+            Async::NotReady => false,
+        }
+    }
+
+    /// Returns whether this is `Async::NotReady`
+    pub fn is_not_ready(&self) -> bool {
+        !self.is_ready()
+    }
+}
+
+impl<T> From<T> for Async<T> {
+    fn from(t: T) -> Async<T> {
+        Async::Ready(t)
+    }
+}
+
+/// The result of an asynchronous attempt to send a value to a sink.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum AsyncSink<T> {
+    /// The `start_send` attempt succeeded, so the sending process has
+    /// *started*; you must use `Sink::poll_complete` to drive the send
+    /// to completion.
+    Ready,
+
+    /// The `start_send` attempt failed due to the sink being full. The value
+    /// being sent is returned, and the current `Task` will be automatically
+    /// notified again once the sink has room.
+    NotReady(T),
+}
+
+impl<T> AsyncSink<T> {
+    /// Change the NotReady value of this `AsyncSink` with the closure provided
+    pub fn map<F, U>(self, f: F) -> AsyncSink<U>
+        where F: FnOnce(T) -> U,
+    {
+        match self {
+            AsyncSink::Ready => AsyncSink::Ready,
+            AsyncSink::NotReady(t) => AsyncSink::NotReady(f(t)),
+        }
+    }
+
+    /// Returns whether this is `AsyncSink::Ready`
+    pub fn is_ready(&self) -> bool {
+        match *self {
+            AsyncSink::Ready => true,
+            AsyncSink::NotReady(_) => false,
+        }
+    }
+
+    /// Returns whether this is `AsyncSink::NotReady`
+    pub fn is_not_ready(&self) -> bool {
+        !self.is_ready()
+    }
+}
+
+
+/// Return type of the `Sink::start_send` method, indicating the outcome of a
+/// send attempt. See `AsyncSink` for more details.
+pub type StartSend<T, E> = Result<AsyncSink<T>, E>;
diff --git a/rustc_deps/vendor/futures/src/resultstream.rs b/rustc_deps/vendor/futures/src/resultstream.rs
new file mode 100644
index 0000000..23a9981
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/resultstream.rs
@@ -0,0 +1,46 @@
+// This should really be in the stream module,
+// but `pub(crate)` isn't available until Rust 1.18,
+// and pre-1.18 there isn't a really good way to have a sub-module
+// available to the crate, but not without it.
+use core::marker::PhantomData;
+
+use {Poll, Async};
+use stream::Stream;
+
+
+/// A stream combinator used to convert a `Stream<Item=T,Error=E>`
+/// to a `Stream<Item=Result<T,E>>`.
+///
+/// A poll on this stream will never return an `Err`. As such the
+/// actual error type is parameterized, so it can match whatever error
+/// type is needed.
+///
+/// This structure is produced by the `Stream::results` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Results<S: Stream, E> {
+    inner: S,
+    phantom: PhantomData<E>
+}
+
+pub fn new<S, E>(s: S) -> Results<S, E> where S: Stream {
+    Results {
+        inner: s,
+        phantom: PhantomData
+    }
+}
+
+impl<S: Stream, E> Stream for Results<S, E> {
+    type Item = Result<S::Item, S::Error>;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<Result<S::Item, S::Error>>, E> {
+        match self.inner.poll() {
+            Ok(Async::Ready(Some(item))) => Ok(Async::Ready(Some(Ok(item)))),
+            Err(e) => Ok(Async::Ready(Some(Err(e)))),
+            Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
+
diff --git a/rustc_deps/vendor/futures/src/sink/buffer.rs b/rustc_deps/vendor/futures/src/sink/buffer.rs
new file mode 100644
index 0000000..419579d
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/buffer.rs
@@ -0,0 +1,108 @@
+use std::collections::VecDeque;
+
+use {Poll, Async};
+use {StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::buffer` combinator, which buffers up to some fixed
+/// number of values when the underlying sink is unable to accept them.
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct Buffer<S: Sink> {
+    sink: S,
+    buf: VecDeque<S::SinkItem>,
+
+    // Track capacity separately from the `VecDeque`, which may be rounded up
+    cap: usize,
+}
+
+pub fn new<S: Sink>(sink: S, amt: usize) -> Buffer<S> {
+    Buffer {
+        sink: sink,
+        buf: VecDeque::with_capacity(amt),
+        cap: amt,
+    }
+}
+
+impl<S: Sink> Buffer<S> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
+    fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> {
+        while let Some(item) = self.buf.pop_front() {
+            if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
+                self.buf.push_front(item);
+
+                return Ok(Async::NotReady);
+            }
+        }
+
+        Ok(Async::Ready(()))
+    }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S> Stream for Buffer<S> where S: Sink + Stream {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
+
+impl<S: Sink> Sink for Buffer<S> {
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+        if self.cap == 0 {
+            return self.sink.start_send(item);
+        }
+
+        self.try_empty_buffer()?;
+        if self.buf.len() == self.cap {
+            return Ok(AsyncSink::NotReady(item));
+        }
+        self.buf.push_back(item);
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        if self.cap == 0 {
+            return self.sink.poll_complete();
+        }
+
+        try_ready!(self.try_empty_buffer());
+        debug_assert!(self.buf.is_empty());
+        self.sink.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        if self.cap == 0 {
+            return self.sink.close();
+        }
+
+        if self.buf.len() > 0 {
+            try_ready!(self.try_empty_buffer());
+        }
+        assert_eq!(self.buf.len(), 0);
+        self.sink.close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/fanout.rs b/rustc_deps/vendor/futures/src/sink/fanout.rs
new file mode 100644
index 0000000..8d2456e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/fanout.rs
@@ -0,0 +1,135 @@
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::mem::replace;
+
+use {Async, AsyncSink, Poll, Sink, StartSend};
+
+/// Sink that clones incoming items and forwards them to two sinks at the same time.
+///
+/// Backpressure from any downstream sink propagates up, which means that this sink
+/// can only process items as fast as its _slowest_ downstream sink.
+pub struct Fanout<A: Sink, B: Sink> {
+    left: Downstream<A>,
+    right: Downstream<B>
+}
+
+impl<A: Sink, B: Sink> Fanout<A, B> {
+    /// Consumes this combinator, returning the underlying sinks.
+    ///
+    /// Note that this may discard intermediate state of this combinator,
+    /// so care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> (A, B) {
+        (self.left.sink, self.right.sink)
+    }
+}
+
+impl<A: Sink + Debug, B: Sink + Debug> Debug for Fanout<A, B>
+    where A::SinkItem: Debug,
+          B::SinkItem: Debug
+{
+    fn fmt(&self, f: &mut Formatter) -> FmtResult {
+        f.debug_struct("Fanout")
+            .field("left", &self.left)
+            .field("right", &self.right)
+            .finish()
+    }
+}
+
+pub fn new<A: Sink, B: Sink>(left: A, right: B) -> Fanout<A, B> {
+    Fanout {
+        left: Downstream::new(left),
+        right: Downstream::new(right)
+    }
+}
+
+impl<A, B> Sink for Fanout<A, B>
+    where A: Sink,
+          A::SinkItem: Clone,
+          B: Sink<SinkItem=A::SinkItem, SinkError=A::SinkError>
+{
+    type SinkItem = A::SinkItem;
+    type SinkError = A::SinkError;
+
+    fn start_send(
+        &mut self, 
+        item: Self::SinkItem
+    ) -> StartSend<Self::SinkItem, Self::SinkError> {
+        // Attempt to complete processing any outstanding requests.
+        self.left.keep_flushing()?;
+        self.right.keep_flushing()?;
+        // Only if both downstream sinks are ready, start sending the next item.
+        if self.left.is_ready() && self.right.is_ready() {
+            self.left.state = self.left.sink.start_send(item.clone())?;
+            self.right.state = self.right.sink.start_send(item)?;
+            Ok(AsyncSink::Ready)
+        } else {
+            Ok(AsyncSink::NotReady(item))
+        }
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        let left_async = self.left.poll_complete()?;
+        let right_async = self.right.poll_complete()?;
+        // Only if both downstream sinks are ready, signal readiness.
+        if left_async.is_ready() && right_async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        let left_async = self.left.close()?;
+        let right_async = self.right.close()?;
+        // Only if both downstream sinks are ready, signal readiness.
+        if left_async.is_ready() && right_async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        } 
+    }
+}
+
+#[derive(Debug)]
+struct Downstream<S: Sink> {
+    sink: S,
+    state: AsyncSink<S::SinkItem>
+}
+
+impl<S: Sink> Downstream<S> {
+    fn new(sink: S) -> Self {
+        Downstream { sink: sink, state: AsyncSink::Ready }
+    }
+
+    fn is_ready(&self) -> bool {
+        self.state.is_ready()
+    }
+
+    fn keep_flushing(&mut self) -> Result<(), S::SinkError> {
+        if let AsyncSink::NotReady(item) = replace(&mut self.state, AsyncSink::Ready) {
+            self.state = self.sink.start_send(item)?;
+        }
+        Ok(())
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.keep_flushing()?;
+        let async = self.sink.poll_complete()?;
+        // Only if all values have been sent _and_ the underlying
+        // sink is completely flushed, signal readiness.
+        if self.state.is_ready() && async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.keep_flushing()?;
+        // If all items have been flushed, initiate close.
+        if self.state.is_ready() {
+            self.sink.close()
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/flush.rs b/rustc_deps/vendor/futures/src/sink/flush.rs
new file mode 100644
index 0000000..f66811e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/flush.rs
@@ -0,0 +1,46 @@
+use {Poll, Async, Future};
+use sink::Sink;
+
+/// Future for the `Sink::flush` combinator, which polls the sink until all data
+/// has been flushed.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Flush<S> {
+    sink: Option<S>,
+}
+
+pub fn new<S: Sink>(sink: S) -> Flush<S> {
+    Flush { sink: Some(sink) }
+}
+
+impl<S: Sink> Flush<S> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        self.sink.as_ref().expect("Attempted `Flush::get_ref` after the flush completed")
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed")
+    }
+
+    /// Consume the `Flush` and return the inner sink.
+    pub fn into_inner(self) -> S {
+        self.sink.expect("Attempted `Flush::into_inner` after the flush completed")
+    }
+}
+
+impl<S: Sink> Future for Flush<S> {
+    type Item = S;
+    type Error = S::SinkError;
+
+    fn poll(&mut self) -> Poll<S, S::SinkError> {
+        let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed");
+        if sink.poll_complete()?.is_ready() {
+            Ok(Async::Ready(sink))
+        } else {
+            self.sink = Some(sink);
+            Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/from_err.rs b/rustc_deps/vendor/futures/src/sink/from_err.rs
new file mode 100644
index 0000000..4880c30
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/from_err.rs
@@ -0,0 +1,71 @@
+use core::marker::PhantomData;
+
+use {Sink, Poll, StartSend};
+
+/// A sink combinator to change the error type of a sink.
+///
+/// This is created by the `Sink::from_err` method.
+#[derive(Clone, Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SinkFromErr<S, E> {
+    sink: S,
+    f: PhantomData<E>
+}
+
+pub fn new<S, E>(sink: S) -> SinkFromErr<S, E>
+    where S: Sink
+{
+    SinkFromErr {
+        sink: sink,
+        f: PhantomData
+    }
+}
+
+impl<S, E> SinkFromErr<S, E> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+}
+
+impl<S, E> Sink for SinkFromErr<S, E>
+    where S: Sink,
+          E: From<S::SinkError>
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = E;
+
+    fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+        self.sink.start_send(item).map_err(|e| e.into())
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        self.sink.poll_complete().map_err(|e| e.into())
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        self.sink.close().map_err(|e| e.into())
+    }
+}
+
+impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/map_err.rs b/rustc_deps/vendor/futures/src/sink/map_err.rs
new file mode 100644
index 0000000..25c168c
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/map_err.rs
@@ -0,0 +1,64 @@
+use sink::Sink;
+
+use {Poll, StartSend, Stream};
+
+/// Sink for the `Sink::sink_map_err` combinator.
+#[derive(Clone,Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct SinkMapErr<S, F> {
+    sink: S,
+    f: Option<F>,
+}
+
+pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> {
+    SinkMapErr { sink: s, f: Some(f) }
+}
+
+impl<S, E> SinkMapErr<S, E> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+}
+
+impl<S, F, E> Sink for SinkMapErr<S, F>
+    where S: Sink,
+          F: FnOnce(S::SinkError) -> E,
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = E;
+
+    fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+        self.sink.start_send(item).map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        self.sink.poll_complete().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+    }
+}
+
+impl<S: Stream, F> Stream for SinkMapErr<S, F> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/mod.rs b/rustc_deps/vendor/futures/src/sink/mod.rs
new file mode 100644
index 0000000..e5ea97f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/mod.rs
@@ -0,0 +1,489 @@
+//! Asynchronous sinks
+//!
+//! This module contains the `Sink` trait, along with a number of adapter types
+//! for it. An overview is available in the documentation for the trait itself.
+//!
+//! You can find more information/tutorials about streams [online at
+//! https://tokio.rs][online]
+//!
+//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+
+use {IntoFuture, Poll, StartSend};
+use stream::Stream;
+
+mod with;
+mod with_flat_map;
+// mod with_map;
+// mod with_filter;
+// mod with_filter_map;
+mod flush;
+mod from_err;
+mod send;
+mod send_all;
+mod map_err;
+mod fanout;
+
+if_std! {
+    mod buffer;
+    mod wait;
+
+    pub use self::buffer::Buffer;
+    pub use self::wait::Wait;
+
+    // TODO: consider expanding this via e.g. FromIterator
+    impl<T> Sink for ::std::vec::Vec<T> {
+        type SinkItem = T;
+        type SinkError = (); // Change this to ! once it stabilizes
+
+        fn start_send(&mut self, item: Self::SinkItem)
+                      -> StartSend<Self::SinkItem, Self::SinkError>
+        {
+            self.push(item);
+            Ok(::AsyncSink::Ready)
+        }
+
+        fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+            Ok(::Async::Ready(()))
+        }
+
+        fn close(&mut self) -> Poll<(), Self::SinkError> {
+            Ok(::Async::Ready(()))
+        }
+    }
+
+    /// A type alias for `Box<Sink + Send>`
+    pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> +
+                                               ::core::marker::Send>;
+
+    impl<S: ?Sized + Sink> Sink for ::std::boxed::Box<S> {
+        type SinkItem = S::SinkItem;
+        type SinkError = S::SinkError;
+
+        fn start_send(&mut self, item: Self::SinkItem)
+                      -> StartSend<Self::SinkItem, Self::SinkError> {
+            (**self).start_send(item)
+        }
+
+        fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+            (**self).poll_complete()
+        }
+
+        fn close(&mut self) -> Poll<(), Self::SinkError> {
+            (**self).close()
+        }
+    }
+}
+
+pub use self::with::With;
+pub use self::with_flat_map::WithFlatMap;
+pub use self::flush::Flush;
+pub use self::send::Send;
+pub use self::send_all::SendAll;
+pub use self::map_err::SinkMapErr;
+pub use self::from_err::SinkFromErr;
+pub use self::fanout::Fanout;
+
+/// A `Sink` is a value into which other values can be sent, asynchronously.
+///
+/// Basic examples of sinks include the sending side of:
+///
+/// - Channels
+/// - Sockets
+/// - Pipes
+///
+/// In addition to such "primitive" sinks, it's typical to layer additional
+/// functionality, such as buffering, on top of an existing sink.
+///
+/// Sending to a sink is "asynchronous" in the sense that the value may not be
+/// sent in its entirety immediately. Instead, values are sent in a two-phase
+/// way: first by initiating a send, and then by polling for completion. This
+/// two-phase setup is analogous to buffered writing in synchronous code, where
+/// writes often succeed immediately, but internally are buffered and are
+/// *actually* written only upon flushing.
+///
+/// In addition, the `Sink` may be *full*, in which case it is not even possible
+/// to start the sending process.
+///
+/// As with `Future` and `Stream`, the `Sink` trait is built from a few core
+/// required methods, and a host of default methods for working in a
+/// higher-level way. The `Sink::send_all` combinator is of particular
+/// importance: you can use it to send an entire stream to a sink, which is
+/// the simplest way to ultimately consume a sink.
+///
+/// You can find more information/tutorials about streams [online at
+/// https://tokio.rs][online]
+///
+/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+pub trait Sink {
+    /// The type of value that the sink accepts.
+    type SinkItem;
+
+    /// The type of value produced by the sink when an error occurs.
+    type SinkError;
+
+    /// Begin the process of sending a value to the sink.
+    ///
+    /// As the name suggests, this method only *begins* the process of sending
+    /// the item. If the sink employs buffering, the item isn't fully processed
+    /// until the buffer is fully flushed. Since sinks are designed to work with
+    /// asynchronous I/O, the process of actually writing out the data to an
+    /// underlying object takes place asynchronously. **You *must* use
+    /// `poll_complete` in order to drive completion of a send**. In particular,
+    /// `start_send` does not begin the flushing process
+    ///
+    /// # Return value
+    ///
+    /// This method returns `AsyncSink::Ready` if the sink was able to start
+    /// sending `item`. In that case, you *must* ensure that you call
+    /// `poll_complete` to process the sent item to completion. Note, however,
+    /// that several calls to `start_send` can be made prior to calling
+    /// `poll_complete`, which will work on completing all pending items.
+    ///
+    /// The method returns `AsyncSink::NotReady` if the sink was unable to begin
+    /// sending, usually due to being full. The sink must have attempted to
+    /// complete processing any outstanding requests (equivalent to
+    /// `poll_complete`) before yielding this result. The current task will be
+    /// automatically scheduled for notification when the sink may be ready to
+    /// receive new values.
+    ///
+    /// # Errors
+    ///
+    /// If the sink encounters an error other than being temporarily full, it
+    /// uses the `Err` variant to signal that error. In most cases, such errors
+    /// mean that the sink will permanently be unable to receive items.
+    ///
+    /// # Panics
+    ///
+    /// This method may panic in a few situations, depending on the specific
+    /// sink:
+    ///
+    /// - It is called outside of the context of a task.
+    /// - A previous call to `start_send` or `poll_complete` yielded an error.
+    fn start_send(&mut self, item: Self::SinkItem)
+                  -> StartSend<Self::SinkItem, Self::SinkError>;
+
+    /// Flush all output from this sink, if necessary.
+    ///
+    /// Some sinks may buffer intermediate data as an optimization to improve
+    /// throughput. In other words, if a sink has a corresponding receiver then
+    /// a successful `start_send` above may not guarantee that the value is
+    /// actually ready to be received by the receiver. This function is intended
+    /// to be used to ensure that values do indeed make their way to the
+    /// receiver.
+    ///
+    /// This function will attempt to process any pending requests on behalf of
+    /// the sink and drive it to completion.
+    ///
+    /// # Return value
+    ///
+    /// Returns `Ok(Async::Ready(()))` when no buffered items remain. If this
+    /// value is returned then it is guaranteed that all previous values sent
+    /// via `start_send` will be guaranteed to be available to a listening
+    /// receiver.
+    ///
+    /// Returns `Ok(Async::NotReady)` if there is more work left to do, in which
+    /// case the current task is scheduled to wake up when more progress may be
+    /// possible.
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err` if the sink encounters an error while processing one of
+    /// its pending requests. Due to the buffered nature of requests, it is not
+    /// generally possible to correlate the error with a particular request. As
+    /// with `start_send`, these errors are generally "fatal" for continued use
+    /// of the sink.
+    ///
+    /// # Panics
+    ///
+    /// This method may panic in a few situations, depending on the specific sink:
+    ///
+    /// - It is called outside of the context of a task.
+    /// - A previous call to `start_send` or `poll_complete` yielded an error.
+    ///
+    /// # Compatibility nodes
+    ///
+    /// The name of this method may be slightly misleading as the original
+    /// intention was to have this method be more general than just flushing
+    /// requests. Over time though it was decided to trim back the ambitions of
+    /// this method to what it's always done, just flushing.
+    ///
+    /// In the 0.2 release series of futures this method will be renamed to
+    /// `poll_flush`. For 0.1, however, the breaking change is not happening
+    /// yet.
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError>;
+
+    /// A method to indicate that no more values will ever be pushed into this
+    /// sink.
+    ///
+    /// This method is used to indicate that a sink will no longer even be given
+    /// another value by the caller. That is, the `start_send` method above will
+    /// be called no longer (nor `poll_complete`). This method is intended to
+    /// model "graceful shutdown" in various protocols where the intent to shut
+    /// down is followed by a little more blocking work.
+    ///
+    /// Callers of this function should work it it in a similar fashion to
+    /// `poll_complete`. Once called it may return `NotReady` which indicates
+    /// that more external work needs to happen to make progress. The current
+    /// task will be scheduled to receive a notification in such an event,
+    /// however.
+    ///
+    /// Note that this function will imply `poll_complete` above. That is, if a
+    /// sink has buffered data, then it'll be flushed out during a `close`
+    /// operation. It is not necessary to have `poll_complete` return `Ready`
+    /// before a `close` is called. Once a `close` is called, though,
+    /// `poll_complete` cannot be called.
+    ///
+    /// # Return value
+    ///
+    /// This function, like `poll_complete`, returns a `Poll`. The value is
+    /// `Ready` once the close operation has completed. At that point it should
+    /// be safe to drop the sink and deallocate associated resources.
+    ///
+    /// If the value returned is `NotReady` then the sink is not yet closed and
+    /// work needs to be done to close it. The work has been scheduled and the
+    /// current task will receive a notification when it's next ready to call
+    /// this method again.
+    ///
+    /// Finally, this function may also return an error.
+    ///
+    /// # Errors
+    ///
+    /// This function will return an `Err` if any operation along the way during
+    /// the close operation fails. An error typically is fatal for a sink and is
+    /// unable to be recovered from, but in specific situations this may not
+    /// always be true.
+    ///
+    /// Note that it's also typically an error to call `start_send` or
+    /// `poll_complete` after the `close` function is called. This method will
+    /// *initiate* a close, and continuing to send values after that (or attempt
+    /// to flush) may result in strange behavior, panics, errors, etc. Once this
+    /// method is called, it must be the only method called on this `Sink`.
+    ///
+    /// # Panics
+    ///
+    /// This method may panic or cause panics if:
+    ///
+    /// * It is called outside the context of a future's task
+    /// * It is called and then `start_send` or `poll_complete` is called
+    ///
+    /// # Compatibility notes
+    ///
+    /// Note that this function is currently by default a provided function,
+    /// defaulted to calling `poll_complete` above. This function was added
+    /// in the 0.1 series of the crate as a backwards-compatible addition. It
+    /// is intended that in the 0.2 series the method will no longer be a
+    /// default method.
+    ///
+    /// It is highly recommended to consider this method a required method and
+    /// to implement it whenever you implement `Sink` locally. It is especially
+    /// crucial to be sure to close inner sinks, if applicable.
+    #[cfg(feature = "with-deprecated")]
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        self.poll_complete()
+    }
+
+    /// dox (you should see the above, not this)
+    #[cfg(not(feature = "with-deprecated"))]
+    fn close(&mut self) -> Poll<(), Self::SinkError>;
+
+    /// Creates a new object which will produce a synchronous sink.
+    ///
+    /// The sink returned does **not** implement the `Sink` trait, and instead
+    /// only has two methods: `send` and `flush`. These two methods correspond
+    /// to `start_send` and `poll_complete` above except are executed in a
+    /// blocking fashion.
+    #[cfg(feature = "use_std")]
+    fn wait(self) -> Wait<Self>
+        where Self: Sized
+    {
+        wait::new(self)
+    }
+
+    /// Composes a function *in front of* the sink.
+    ///
+    /// This adapter produces a new sink that passes each value through the
+    /// given function `f` before sending it to `self`.
+    ///
+    /// To process each value, `f` produces a *future*, which is then polled to
+    /// completion before passing its result down to the underlying sink. If the
+    /// future produces an error, that error is returned by the new sink.
+    ///
+    /// Note that this function consumes the given sink, returning a wrapped
+    /// version, much like `Iterator::map`.
+    fn with<U, F, Fut>(self, f: F) -> With<Self, U, F, Fut>
+        where F: FnMut(U) -> Fut,
+              Fut: IntoFuture<Item = Self::SinkItem>,
+              Fut::Error: From<Self::SinkError>,
+              Self: Sized
+    {
+        with::new(self, f)
+    }
+
+    /// Composes a function *in front of* the sink.
+    ///
+    /// This adapter produces a new sink that passes each value through the
+    /// given function `f` before sending it to `self`.
+    ///
+    /// To process each value, `f` produces a *stream*, of which each value
+    /// is passed to the underlying sink. A new value will not be accepted until
+    /// the stream has been drained
+    ///
+    /// Note that this function consumes the given sink, returning a wrapped
+    /// version, much like `Iterator::flat_map`.
+    ///
+    /// # Examples
+    /// ---
+    /// Using this function with an iterator through use of the `stream::iter_ok()`
+    /// function
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (tx, rx) = mpsc::channel::<i32>(5);
+    ///
+    /// let tx = tx.with_flat_map(|x| {
+    ///     stream::iter_ok(vec![42; x].into_iter().map(|y| y))
+    /// });
+    /// tx.send(5).wait().unwrap();
+    /// assert_eq!(rx.collect().wait(), Ok(vec![42, 42, 42, 42, 42]))
+    /// ```
+    fn with_flat_map<U, F, St>(self, f: F) -> WithFlatMap<Self, U, F, St>
+        where F: FnMut(U) -> St,
+              St: Stream<Item = Self::SinkItem, Error=Self::SinkError>,
+              Self: Sized
+        {
+            with_flat_map::new(self, f)
+        }
+
+    /*
+    fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
+        where F: FnMut(U) -> Self::SinkItem,
+              Self: Sized;
+
+    fn with_filter<F>(self, f: F) -> WithFilter<Self, F>
+        where F: FnMut(Self::SinkItem) -> bool,
+              Self: Sized;
+
+    fn with_filter_map<U, F>(self, f: F) -> WithFilterMap<Self, U, F>
+        where F: FnMut(U) -> Option<Self::SinkItem>,
+              Self: Sized;
+     */
+
+    /// Transforms the error returned by the sink.
+    fn sink_map_err<F, E>(self, f: F) -> SinkMapErr<Self, F>
+        where F: FnOnce(Self::SinkError) -> E,
+              Self: Sized,
+    {
+        map_err::new(self, f)
+    }
+
+    /// Map this sink's error to any error implementing `From` for this sink's
+    /// `Error`, returning a new sink.
+    ///
+    /// If wanting to map errors of a `Sink + Stream`, use `.sink_from_err().from_err()`.
+    fn sink_from_err<E: From<Self::SinkError>>(self) -> from_err::SinkFromErr<Self, E>
+        where Self: Sized,
+    {
+        from_err::new(self)
+    }
+
+
+    /// Adds a fixed-size buffer to the current sink.
+    ///
+    /// The resulting sink will buffer up to `amt` items when the underlying
+    /// sink is unwilling to accept additional items. Calling `poll_complete` on
+    /// the buffered sink will attempt to both empty the buffer and complete
+    /// processing on the underlying sink.
+    ///
+    /// Note that this function consumes the given sink, returning a wrapped
+    /// version, much like `Iterator::map`.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    #[cfg(feature = "use_std")]
+    fn buffer(self, amt: usize) -> Buffer<Self>
+        where Self: Sized
+    {
+        buffer::new(self, amt)
+    }
+
+    /// Fanout items to multiple sinks.
+    ///
+    /// This adapter clones each incoming item and forwards it to both this as well as
+    /// the other sink at the same time.
+    fn fanout<S>(self, other: S) -> Fanout<Self, S>
+        where Self: Sized,
+              Self::SinkItem: Clone,
+              S: Sink<SinkItem=Self::SinkItem, SinkError=Self::SinkError>
+    {
+        fanout::new(self, other)
+    }
+
+    /// A future that completes when the sink has finished processing all
+    /// pending requests.
+    ///
+    /// The sink itself is returned after flushing is complete; this adapter is
+    /// intended to be used when you want to stop sending to the sink until
+    /// all current requests are processed.
+    fn flush(self) -> Flush<Self>
+        where Self: Sized
+    {
+        flush::new(self)
+    }
+
+    /// A future that completes after the given item has been fully processed
+    /// into the sink, including flushing.
+    ///
+    /// Note that, **because of the flushing requirement, it is usually better
+    /// to batch together items to send via `send_all`, rather than flushing
+    /// between each item.**
+    ///
+    /// On completion, the sink is returned.
+    fn send(self, item: Self::SinkItem) -> Send<Self>
+        where Self: Sized
+    {
+        send::new(self, item)
+    }
+
+    /// A future that completes after the given stream has been fully processed
+    /// into the sink, including flushing.
+    ///
+    /// This future will drive the stream to keep producing items until it is
+    /// exhausted, sending each item to the sink. It will complete once both the
+    /// stream is exhausted, the sink has received all items, the sink has been
+    /// flushed, and the sink has been closed.
+    ///
+    /// Doing `sink.send_all(stream)` is roughly equivalent to
+    /// `stream.forward(sink)`. The returned future will exhaust all items from
+    /// `stream` and send them to `self`, closing `self` when all items have been
+    /// received.
+    ///
+    /// On completion, the pair `(sink, source)` is returned.
+    fn send_all<S>(self, stream: S) -> SendAll<Self, S>
+        where S: Stream<Item = Self::SinkItem>,
+              Self::SinkError: From<S::Error>,
+              Self: Sized
+    {
+        send_all::new(self, stream)
+    }
+}
+
+impl<'a, S: ?Sized + Sink> Sink for &'a mut S {
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: Self::SinkItem)
+                  -> StartSend<Self::SinkItem, Self::SinkError> {
+        (**self).start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        (**self).poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        (**self).close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/send.rs b/rustc_deps/vendor/futures/src/sink/send.rs
new file mode 100644
index 0000000..71173fa
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/send.rs
@@ -0,0 +1,59 @@
+use {Poll, Async, Future, AsyncSink};
+use sink::Sink;
+
+/// Future for the `Sink::send` combinator, which sends a value to a sink and
+/// then waits until the sink has fully flushed.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Send<S: Sink> {
+    sink: Option<S>,
+    item: Option<S::SinkItem>,
+}
+
+pub fn new<S: Sink>(sink: S, item: S::SinkItem) -> Send<S> {
+    Send {
+        sink: Some(sink),
+        item: Some(item),
+    }
+}
+
+impl<S: Sink> Send<S> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        self.sink.as_ref().take().expect("Attempted Send::get_ref after completion")
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.sink.as_mut().take().expect("Attempted Send::get_mut after completion")
+    }
+
+    fn sink_mut(&mut self) -> &mut S {
+        self.sink.as_mut().take().expect("Attempted to poll Send after completion")
+    }
+
+    fn take_sink(&mut self) -> S {
+        self.sink.take().expect("Attempted to poll Send after completion")
+    }
+}
+
+impl<S: Sink> Future for Send<S> {
+    type Item = S;
+    type Error = S::SinkError;
+
+    fn poll(&mut self) -> Poll<S, S::SinkError> {
+        if let Some(item) = self.item.take() {
+            if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
+                self.item = Some(item);
+                return Ok(Async::NotReady);
+            }
+        }
+
+        // we're done sending the item, but want to block on flushing the
+        // sink
+        try_ready!(self.sink_mut().poll_complete());
+
+        // now everything's emptied, so return the sink for further use
+        Ok(Async::Ready(self.take_sink()))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/send_all.rs b/rustc_deps/vendor/futures/src/sink/send_all.rs
new file mode 100644
index 0000000..a230903
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/send_all.rs
@@ -0,0 +1,88 @@
+use {Poll, Async, Future, AsyncSink};
+use stream::{Stream, Fuse};
+use sink::Sink;
+
+/// Future for the `Sink::send_all` combinator, which sends a stream of values
+/// to a sink and then waits until the sink has fully flushed those values.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SendAll<T, U: Stream> {
+    sink: Option<T>,
+    stream: Option<Fuse<U>>,
+    buffered: Option<U::Item>,
+}
+
+pub fn new<T, U>(sink: T, stream: U) -> SendAll<T, U>
+    where T: Sink,
+          U: Stream<Item = T::SinkItem>,
+          T::SinkError: From<U::Error>,
+{
+    SendAll {
+        sink: Some(sink),
+        stream: Some(stream.fuse()),
+        buffered: None,
+    }
+}
+
+impl<T, U> SendAll<T, U>
+    where T: Sink,
+          U: Stream<Item = T::SinkItem>,
+          T::SinkError: From<U::Error>,
+{
+    fn sink_mut(&mut self) -> &mut T {
+        self.sink.as_mut().take().expect("Attempted to poll SendAll after completion")
+    }
+
+    fn stream_mut(&mut self) -> &mut Fuse<U> {
+        self.stream.as_mut().take()
+            .expect("Attempted to poll SendAll after completion")
+    }
+
+    fn take_result(&mut self) -> (T, U) {
+        let sink = self.sink.take()
+            .expect("Attempted to poll Forward after completion");
+        let fuse = self.stream.take()
+            .expect("Attempted to poll Forward after completion");
+        (sink, fuse.into_inner())
+    }
+
+    fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> {
+        debug_assert!(self.buffered.is_none());
+        if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
+            self.buffered = Some(item);
+            return Ok(Async::NotReady)
+        }
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<T, U> Future for SendAll<T, U>
+    where T: Sink,
+          U: Stream<Item = T::SinkItem>,
+          T::SinkError: From<U::Error>,
+{
+    type Item = (T, U);
+    type Error = T::SinkError;
+
+    fn poll(&mut self) -> Poll<(T, U), T::SinkError> {
+        // If we've got an item buffered already, we need to write it to the
+        // sink before we can do anything else
+        if let Some(item) = self.buffered.take() {
+            try_ready!(self.try_start_send(item))
+        }
+
+        loop {
+            match self.stream_mut().poll()? {
+                Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
+                Async::Ready(None) => {
+                    try_ready!(self.sink_mut().close());
+                    return Ok(Async::Ready(self.take_result()))
+                }
+                Async::NotReady => {
+                    try_ready!(self.sink_mut().poll_complete());
+                    return Ok(Async::NotReady)
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/wait.rs b/rustc_deps/vendor/futures/src/sink/wait.rs
new file mode 100644
index 0000000..940a588
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/wait.rs
@@ -0,0 +1,59 @@
+use sink::Sink;
+use executor;
+
+/// A sink combinator which converts an asynchronous sink to a **blocking
+/// sink**.
+///
+/// Created by the `Sink::wait` method, this function transforms any sink into a
+/// blocking version. This is implemented by blocking the current thread when a
+/// sink is otherwise unable to make progress.
+#[must_use = "sinks do nothing unless used"]
+#[derive(Debug)]
+pub struct Wait<S> {
+    sink: executor::Spawn<S>,
+}
+
+pub fn new<S: Sink>(s: S) -> Wait<S> {
+    Wait {
+        sink: executor::spawn(s),
+    }
+}
+
+impl<S: Sink> Wait<S> {
+    /// Sends a value to this sink, blocking the current thread until it's able
+    /// to do so.
+    ///
+    /// This function will take the `value` provided and call the underlying
+    /// sink's `start_send` function until it's ready to accept the value. If
+    /// the function returns `NotReady` then the current thread is blocked
+    /// until it is otherwise ready to accept the value.
+    ///
+    /// # Return value
+    ///
+    /// If `Ok(())` is returned then the `value` provided was successfully sent
+    /// along the sink, and if `Err(e)` is returned then an error occurred
+    /// which prevented the value from being sent.
+    pub fn send(&mut self, value: S::SinkItem) -> Result<(), S::SinkError> {
+        self.sink.wait_send(value)
+    }
+
+    /// Flushes any buffered data in this sink, blocking the current thread
+    /// until it's entirely flushed.
+    ///
+    /// This function will call the underlying sink's `poll_complete` method
+    /// until it returns that it's ready to proceed. If the method returns
+    /// `NotReady` the current thread will be blocked until it's otherwise
+    /// ready to proceed.
+    pub fn flush(&mut self) -> Result<(), S::SinkError> {
+        self.sink.wait_flush()
+    }
+
+    /// Close this sink, blocking the current thread until it's entirely closed.
+    ///
+    /// This function will call the underlying sink's `close` method
+    /// until it returns that it's closed. If the method returns
+    /// `NotReady` the current thread will be blocked until it's otherwise closed.
+    pub fn close(&mut self) -> Result<(), S::SinkError> {
+        self.sink.wait_close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/with.rs b/rustc_deps/vendor/futures/src/sink/with.rs
new file mode 100644
index 0000000..3326b6e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/with.rs
@@ -0,0 +1,153 @@
+use core::mem;
+use core::marker::PhantomData;
+
+use {IntoFuture, Future, Poll, Async, StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::with` combinator, chaining a computation to run *prior*
+/// to pushing a value into the underlying sink.
+#[derive(Clone, Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct With<S, U, F, Fut>
+    where S: Sink,
+          F: FnMut(U) -> Fut,
+          Fut: IntoFuture,
+{
+    sink: S,
+    f: F,
+    state: State<Fut::Future, S::SinkItem>,
+    _phantom: PhantomData<fn(U)>,
+}
+
+#[derive(Clone, Debug)]
+enum State<Fut, T> {
+    Empty,
+    Process(Fut),
+    Buffered(T),
+}
+
+impl<Fut, T> State<Fut, T> {
+    fn is_empty(&self) -> bool {
+        if let State::Empty = *self {
+            true
+        } else {
+            false
+        }
+    }
+}
+
+pub fn new<S, U, F, Fut>(sink: S, f: F) -> With<S, U, F, Fut>
+    where S: Sink,
+          F: FnMut(U) -> Fut,
+          Fut: IntoFuture<Item = S::SinkItem>,
+          Fut::Error: From<S::SinkError>,
+{
+    With {
+        state: State::Empty,
+        sink: sink,
+        f: f,
+        _phantom: PhantomData,
+    }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, U, F, Fut> Stream for With<S, U, F, Fut>
+    where S: Stream + Sink,
+          F: FnMut(U) -> Fut,
+          Fut: IntoFuture
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
+
+impl<S, U, F, Fut> With<S, U, F, Fut>
+    where S: Sink,
+          F: FnMut(U) -> Fut,
+          Fut: IntoFuture<Item = S::SinkItem>,
+          Fut::Error: From<S::SinkError>,
+{
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
+    fn poll(&mut self) -> Poll<(), Fut::Error> {
+        loop {
+            match mem::replace(&mut self.state, State::Empty) {
+                State::Empty => break,
+                State::Process(mut fut) => {
+                    match fut.poll()? {
+                        Async::Ready(item) => {
+                            self.state = State::Buffered(item);
+                        }
+                        Async::NotReady => {
+                            self.state = State::Process(fut);
+                            break
+                        }
+                    }
+                }
+                State::Buffered(item) => {
+                    if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
+                        self.state = State::Buffered(item);
+                        break
+                    }
+                }
+            }
+        }
+
+        if self.state.is_empty() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
+
+impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
+    where S: Sink,
+          F: FnMut(U) -> Fut,
+          Fut: IntoFuture<Item = S::SinkItem>,
+          Fut::Error: From<S::SinkError>,
+{
+    type SinkItem = U;
+    type SinkError = Fut::Error;
+
+    fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> {
+        if self.poll()?.is_not_ready() {
+            return Ok(AsyncSink::NotReady(item))
+        }
+        self.state = State::Process((self.f)(item).into_future());
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Fut::Error> {
+        // poll ourselves first, to push data downward
+        let me_ready = self.poll()?;
+        // always propagate `poll_complete` downward to attempt to make progress
+        try_ready!(self.sink.poll_complete());
+        Ok(me_ready)
+    }
+
+    fn close(&mut self) -> Poll<(), Fut::Error> {
+        try_ready!(self.poll());
+        Ok(self.sink.close()?)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sink/with_flat_map.rs b/rustc_deps/vendor/futures/src/sink/with_flat_map.rs
new file mode 100644
index 0000000..80c4f66
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sink/with_flat_map.rs
@@ -0,0 +1,126 @@
+use core::marker::PhantomData;
+
+use {Poll, Async, StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::with_flat_map` combinator, chaining a computation that returns an iterator
+/// to run prior to pushing a value into the underlying sink
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    sink: S,
+    f: F,
+    stream: Option<St>,
+    buffer: Option<S::SinkItem>,
+    _phantom: PhantomData<fn(U)>,
+}
+
+pub fn new<S, U, F, St>(sink: S, f: F) -> WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    WithFlatMap {
+        sink: sink,
+        f: f,
+        stream: None,
+        buffer: None,
+        _phantom: PhantomData,
+    }
+}
+
+impl<S, U, F, St> WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
+    fn try_empty_stream(&mut self) -> Poll<(), S::SinkError> {
+        if let Some(x) = self.buffer.take() {
+            if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+                self.buffer = Some(x);
+                return Ok(Async::NotReady);
+            }
+        }
+        if let Some(mut stream) = self.stream.take() {
+            while let Some(x) = try_ready!(stream.poll()) {
+                if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+                    self.stream = Some(stream);
+                    self.buffer = Some(x);
+                    return Ok(Async::NotReady);
+                }
+            }
+        }
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<S, U, F, St> Stream for WithFlatMap<S, U, F, St>
+where
+    S: Stream + Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
+
+impl<S, U, F, St> Sink for WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    type SinkItem = U;
+    type SinkError = S::SinkError;
+    fn start_send(&mut self, i: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(AsyncSink::NotReady(i));
+        }
+        assert!(self.stream.is_none());
+        self.stream = Some((self.f)(i));
+        self.try_empty_stream()?;
+        Ok(AsyncSink::Ready)
+    }
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(Async::NotReady);
+        }
+        self.sink.poll_complete()
+    }
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(Async::NotReady);
+        }
+        assert!(self.stream.is_none());
+        self.sink.close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/and_then.rs b/rustc_deps/vendor/futures/src/stream/and_then.rs
new file mode 100644
index 0000000..1fac8b9
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/and_then.rs
@@ -0,0 +1,106 @@
+use {IntoFuture, Future, Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto values produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::and_then` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct AndThen<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+          U: IntoFuture<Error=S::Error>,
+{
+    AndThen {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+impl<S, F, U> AndThen<S, F, U>
+    where U: IntoFuture,
+{
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, U> Stream for AndThen<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+          U: IntoFuture<Error=S::Error>,
+{
+    type Item = U::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<U::Item>, S::Error> {
+        if self.future.is_none() {
+            let item = match try_ready!(self.stream.poll()) {
+                None => return Ok(Async::Ready(None)),
+                Some(e) => e,
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        match self.future.as_mut().unwrap().poll() {
+            Ok(Async::Ready(e)) => {
+                self.future = None;
+                Ok(Async::Ready(Some(e)))
+            }
+            Err(e) => {
+                self.future = None;
+                Err(e)
+            }
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/buffer_unordered.rs b/rustc_deps/vendor/futures/src/stream/buffer_unordered.rs
new file mode 100644
index 0000000..3011108
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/buffer_unordered.rs
@@ -0,0 +1,130 @@
+use std::fmt;
+
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesUnordered};
+
+/// An adaptor for a stream of futures to execute the futures concurrently, if
+/// possible, delivering results as they become available.
+///
+/// This adaptor will buffer up a list of pending futures, and then return their
+/// results in the order that they complete. This is created by the
+/// `Stream::buffer_unordered` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct BufferUnordered<S>
+    where S: Stream,
+          S::Item: IntoFuture,
+{
+    stream: Fuse<S>,
+    queue: FuturesUnordered<<S::Item as IntoFuture>::Future>,
+    max: usize,
+}
+
+impl<S> fmt::Debug for BufferUnordered<S>
+    where S: Stream + fmt::Debug,
+          S::Item: IntoFuture,
+          <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("BufferUnordered")
+            .field("stream", &self.stream)
+            .field("queue", &self.queue)
+            .field("max", &self.max)
+            .finish()
+    }
+}
+
+pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    BufferUnordered {
+        stream: super::fuse::new(s),
+        queue: FuturesUnordered::new(),
+        max: amt,
+    }
+}
+
+impl<S> BufferUnordered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
+}
+
+impl<S> Stream for BufferUnordered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    type Item = <S::Item as IntoFuture>::Item;
+    type Error = <S as Stream>::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        // First up, try to spawn off as many futures as possible by filling up
+        // our slab of futures.
+        while self.queue.len() < self.max {
+            let future = match self.stream.poll()? {
+                Async::Ready(Some(s)) => s.into_future(),
+                Async::Ready(None) |
+                Async::NotReady => break,
+            };
+
+            self.queue.push(future);
+        }
+
+        // Try polling a new future
+        if let Some(val) = try_ready!(self.queue.poll()) {
+            return Ok(Async::Ready(Some(val)));
+        }
+
+        // If we've gotten this far, then there are no events for us to process
+        // and nothing was ready, so figure out if we're not done yet  or if
+        // we've reached the end.
+        if self.stream.is_done() {
+            Ok(Async::Ready(None))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for BufferUnordered<S>
+    where S: ::sink::Sink + Stream,
+          S::Item: IntoFuture,
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/buffered.rs b/rustc_deps/vendor/futures/src/stream/buffered.rs
new file mode 100644
index 0000000..5616b73
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/buffered.rs
@@ -0,0 +1,132 @@
+use std::fmt;
+
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesOrdered};
+
+/// An adaptor for a stream of futures to execute the futures concurrently, if
+/// possible.
+///
+/// This adaptor will buffer up a list of pending futures, and then return their
+/// results in the order that they were pulled out of the original stream. This
+/// is created by the `Stream::buffered` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture,
+{
+    stream: Fuse<S>,
+    queue: FuturesOrdered<<S::Item as IntoFuture>::Future>,
+    max: usize,
+}
+
+impl<S> fmt::Debug for Buffered<S>
+    where S: Stream + fmt::Debug,
+          S::Item: IntoFuture,
+          <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
+          <<S as Stream>::Item as IntoFuture>::Item: fmt::Debug,
+          <<S as Stream>::Item as IntoFuture>::Error: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Buffered")
+            .field("stream", &self.stream)
+            .field("queue", &self.queue)
+            .field("max", &self.max)
+            .finish()
+    }
+}
+
+pub fn new<S>(s: S, amt: usize) -> Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    Buffered {
+        stream: super::fuse::new(s),
+        queue: FuturesOrdered::new(),
+        max: amt,
+    }
+}
+
+impl<S> Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Buffered<S>
+    where S: ::sink::Sink + Stream,
+          S::Item: IntoFuture,
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S> Stream for Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    type Item = <S::Item as IntoFuture>::Item;
+    type Error = <S as Stream>::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        // First up, try to spawn off as many futures as possible by filling up
+        // our slab of futures.
+        while self.queue.len() < self.max {
+            let future = match self.stream.poll()? {
+                Async::Ready(Some(s)) => s.into_future(),
+                Async::Ready(None) |
+                Async::NotReady => break,
+            };
+
+            self.queue.push(future);
+        }
+
+        // Try polling a new future
+        if let Some(val) = try_ready!(self.queue.poll()) {
+            return Ok(Async::Ready(Some(val)));
+        }
+
+        // If we've gotten this far, then there are no events for us to process
+        // and nothing was ready, so figure out if we're not done yet  or if
+        // we've reached the end.
+        if self.stream.is_done() {
+            Ok(Async::Ready(None))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/catch_unwind.rs b/rustc_deps/vendor/futures/src/stream/catch_unwind.rs
new file mode 100644
index 0000000..d324494
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/catch_unwind.rs
@@ -0,0 +1,71 @@
+use std::prelude::v1::*;
+use std::any::Any;
+use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
+use std::mem;
+
+use super::super::{Poll, Async};
+use super::Stream;
+
+/// Stream for the `catch_unwind` combinator.
+///
+/// This is created by the `Stream::catch_unwind` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct CatchUnwind<S> where S: Stream {
+    state: CatchUnwindState<S>,
+}
+
+pub fn new<S>(stream: S) -> CatchUnwind<S>
+    where S: Stream + UnwindSafe,
+{
+    CatchUnwind {
+        state: CatchUnwindState::Stream(stream),
+    }
+}
+
+#[derive(Debug)]
+enum CatchUnwindState<S> {
+    Stream(S),
+    Eof,
+    Done,
+}
+
+impl<S> Stream for CatchUnwind<S>
+    where S: Stream + UnwindSafe,
+{
+    type Item = Result<S::Item, S::Error>;
+    type Error = Box<Any + Send>;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        let mut stream = match mem::replace(&mut self.state, CatchUnwindState::Eof) {
+            CatchUnwindState::Done => panic!("cannot poll after eof"),
+            CatchUnwindState::Eof => {
+                self.state = CatchUnwindState::Done;
+                return Ok(Async::Ready(None));
+            }
+            CatchUnwindState::Stream(stream) => stream,
+        };
+        let res = catch_unwind(|| (stream.poll(), stream));
+        match res {
+            Err(e) => Err(e), // and state is already Eof
+            Ok((poll, stream)) => {
+                self.state = CatchUnwindState::Stream(stream);
+                match poll {
+                    Err(e) => Ok(Async::Ready(Some(Err(e)))),
+                    Ok(Async::NotReady) => Ok(Async::NotReady),
+                    Ok(Async::Ready(Some(r))) => Ok(Async::Ready(Some(Ok(r)))),
+                    Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+                }
+            }
+        }
+    }
+}
+
+impl<S: Stream> Stream for AssertUnwindSafe<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.0.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/chain.rs b/rustc_deps/vendor/futures/src/stream/chain.rs
new file mode 100644
index 0000000..0ff0e5c
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/chain.rs
@@ -0,0 +1,57 @@
+use core::mem;
+
+use stream::Stream;
+use {Async, Poll};
+
+
+/// State of chain stream.
+#[derive(Debug)]
+enum State<S1, S2> {
+    /// Emitting elements of first stream
+    First(S1, S2),
+    /// Emitting elements of second stream
+    Second(S2),
+    /// Temporary value to replace first with second
+    Temp,
+}
+
+/// An adapter for chaining the output of two streams.
+///
+/// The resulting stream produces items from first stream and then
+/// from second stream.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Chain<S1, S2> {
+    state: State<S1, S2>
+}
+
+pub fn new<S1, S2>(s1: S1, s2: S2) -> Chain<S1, S2>
+    where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
+{
+    Chain { state: State::First(s1, s2) }
+}
+
+impl<S1, S2> Stream for Chain<S1, S2>
+    where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
+{
+    type Item = S1::Item;
+    type Error = S1::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        loop {
+            match self.state {
+                State::First(ref mut s1, ref _s2) => match s1.poll() {
+                    Ok(Async::Ready(None)) => (), // roll
+                    x => return x,
+                },
+                State::Second(ref mut s2) => return s2.poll(),
+                State::Temp => unreachable!(),
+            }
+
+            self.state = match mem::replace(&mut self.state, State::Temp) {
+                State::First(_s1, s2) => State::Second(s2),
+                _ => unreachable!(),
+            };
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/channel.rs b/rustc_deps/vendor/futures/src/stream/channel.rs
new file mode 100644
index 0000000..89a419d
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/channel.rs
@@ -0,0 +1,114 @@
+#![cfg(feature = "with-deprecated")]
+#![deprecated(since = "0.1.4", note = "use sync::mpsc::channel instead")]
+#![allow(deprecated)]
+
+use std::any::Any;
+use std::error::Error;
+use std::fmt;
+
+use {Poll, Async, Stream, Future, Sink};
+use sink::Send;
+use sync::mpsc;
+
+/// Creates an in-memory channel implementation of the `Stream` trait.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. This
+/// channel is unique in that it implements back pressure to ensure that the
+/// sender never outpaces the receiver. The `Sender::send` method will only
+/// allow sending one message and the next message can only be sent once the
+/// first was consumed.
+///
+/// The `Receiver` returned implements the `Stream` trait and has access to any
+/// number of the associated combinators for transforming the result.
+pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>) {
+    let (tx, rx) = mpsc::channel(0);
+    (Sender { inner: tx }, Receiver { inner: rx })
+}
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `channel` method in the `stream` module.
+#[derive(Debug)]
+pub struct Sender<T, E> {
+    inner: mpsc::Sender<Result<T, E>>,
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `channel` method in the `stream` module.
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub struct Receiver<T, E> {
+    inner: mpsc::Receiver<Result<T, E>>,
+}
+
+/// Error type for sending, used when the receiving end of the channel is dropped
+pub struct SendError<T, E>(Result<T, E>);
+
+/// Future returned by `Sender::send`.
+#[derive(Debug)]
+pub struct FutureSender<T, E> {
+    inner: Send<mpsc::Sender<Result<T, E>>>,
+}
+
+impl<T, E> fmt::Debug for SendError<T, E> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("SendError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T, E> fmt::Display for SendError<T, E> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "send failed because receiver is gone")
+    }
+}
+
+impl<T, E> Error for SendError<T, E>
+    where T: Any, E: Any
+{
+    fn description(&self) -> &str {
+        "send failed because receiver is gone"
+    }
+}
+
+
+impl<T, E> Stream for Receiver<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match self.inner.poll().expect("cannot fail") {
+            Async::Ready(Some(Ok(e))) => Ok(Async::Ready(Some(e))),
+            Async::Ready(Some(Err(e))) => Err(e),
+            Async::Ready(None) => Ok(Async::Ready(None)),
+            Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+}
+
+impl<T, E> Sender<T, E> {
+    /// Sends a new value along this channel to the receiver.
+    ///
+    /// This method consumes the sender and returns a future which will resolve
+    /// to the sender again when the value sent has been consumed.
+    pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> {
+        FutureSender { inner: self.inner.send(t) }
+    }
+}
+
+impl<T, E> Future for FutureSender<T, E> {
+    type Item = Sender<T, E>;
+    type Error = SendError<T, E>;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        match self.inner.poll() {
+            Ok(a) => Ok(a.map(|a| Sender { inner: a })),
+            Err(e) => Err(SendError(e.into_inner())),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/chunks.rs b/rustc_deps/vendor/futures/src/stream/chunks.rs
new file mode 100644
index 0000000..3a361eb
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/chunks.rs
@@ -0,0 +1,135 @@
+use std::mem;
+use std::prelude::v1::*;
+
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// An adaptor that chunks up elements in a vector.
+///
+/// This adaptor will buffer up a list of items in the stream and pass on the
+/// vector used for buffering when a specified capacity has been reached. This
+/// is created by the `Stream::chunks` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Chunks<S>
+    where S: Stream
+{
+    items: Vec<S::Item>,
+    err: Option<S::Error>,
+    stream: Fuse<S>
+}
+
+pub fn new<S>(s: S, capacity: usize) -> Chunks<S>
+    where S: Stream
+{
+    assert!(capacity > 0);
+
+    Chunks {
+        items: Vec::with_capacity(capacity),
+        err: None,
+        stream: super::fuse::new(s),
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Chunks<S>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+
+impl<S> Chunks<S> where S: Stream {
+    fn take(&mut self) -> Vec<S::Item> {
+        let cap = self.items.capacity();
+        mem::replace(&mut self.items, Vec::with_capacity(cap))
+    }
+
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
+}
+
+impl<S> Stream for Chunks<S>
+    where S: Stream
+{
+    type Item = Vec<<S as Stream>::Item>;
+    type Error = <S as Stream>::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        if let Some(err) = self.err.take() {
+            return Err(err)
+        }
+
+        let cap = self.items.capacity();
+        loop {
+            match self.stream.poll() {
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+
+                // Push the item into the buffer and check whether it is full.
+                // If so, replace our buffer with a new and empty one and return
+                // the full one.
+                Ok(Async::Ready(Some(item))) => {
+                    self.items.push(item);
+                    if self.items.len() >= cap {
+                        return Ok(Some(self.take()).into())
+                    }
+                }
+
+                // Since the underlying stream ran out of values, return what we
+                // have buffered, if we have anything.
+                Ok(Async::Ready(None)) => {
+                    return if self.items.len() > 0 {
+                        let full_buf = mem::replace(&mut self.items, Vec::new());
+                        Ok(Some(full_buf).into())
+                    } else {
+                        Ok(Async::Ready(None))
+                    }
+                }
+
+                // If we've got buffered items be sure to return them first,
+                // we'll defer our error for later.
+                Err(e) => {
+                    if self.items.len() == 0 {
+                        return Err(e)
+                    } else {
+                        self.err = Some(e);
+                        return Ok(Some(self.take()).into())
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/collect.rs b/rustc_deps/vendor/futures/src/stream/collect.rs
new file mode 100644
index 0000000..8bd9d0e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/collect.rs
@@ -0,0 +1,52 @@
+use std::prelude::v1::*;
+
+use std::mem;
+
+use {Future, Poll, Async};
+use stream::Stream;
+
+/// A future which collects all of the values of a stream into a vector.
+///
+/// This future is created by the `Stream::collect` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Collect<S> where S: Stream {
+    stream: S,
+    items: Vec<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Collect<S>
+    where S: Stream,
+{
+    Collect {
+        stream: s,
+        items: Vec::new(),
+    }
+}
+
+impl<S: Stream> Collect<S> {
+    fn finish(&mut self) -> Vec<S::Item> {
+        mem::replace(&mut self.items, Vec::new())
+    }
+}
+
+impl<S> Future for Collect<S>
+    where S: Stream,
+{
+    type Item = Vec<S::Item>;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Vec<S::Item>, S::Error> {
+        loop {
+            match self.stream.poll() {
+                Ok(Async::Ready(Some(e))) => self.items.push(e),
+                Ok(Async::Ready(None)) => return Ok(Async::Ready(self.finish())),
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Err(e) => {
+                    self.finish();
+                    return Err(e)
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/concat.rs b/rustc_deps/vendor/futures/src/stream/concat.rs
new file mode 100644
index 0000000..aae5623
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/concat.rs
@@ -0,0 +1,172 @@
+use core::mem;
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::default::Default;
+
+use {Poll, Async};
+use future::Future;
+use stream::Stream;
+
+/// A stream combinator to concatenate the results of a stream into the first
+/// yielded item.
+///
+/// This structure is produced by the `Stream::concat` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct Concat2<S>
+    where S: Stream,
+{
+    inner: ConcatSafe<S>
+}
+
+impl<S: Debug> Debug for Concat2<S> where S: Stream, S::Item: Debug {
+    fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+        fmt.debug_struct("Concat2")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
+pub fn new2<S>(s: S) -> Concat2<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+    Concat2 {
+        inner: new_safe(s)
+    }
+}
+
+impl<S> Future for Concat2<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.inner.poll().map(|a| {
+            match a {
+                Async::NotReady => Async::NotReady,
+                Async::Ready(None) => Async::Ready(Default::default()),
+                Async::Ready(Some(e)) => Async::Ready(e)
+            }
+        })
+    }
+}
+
+
+/// A stream combinator to concatenate the results of a stream into the first
+/// yielded item.
+///
+/// This structure is produced by the `Stream::concat` method.
+#[deprecated(since="0.1.18", note="please use `Stream::Concat2` instead")]
+#[must_use = "streams do nothing unless polled"]
+pub struct Concat<S>
+    where S: Stream,
+{
+    inner: ConcatSafe<S>
+}
+
+#[allow(deprecated)]
+impl<S: Debug> Debug for Concat<S> where S: Stream, S::Item: Debug {
+    fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+        fmt.debug_struct("Concat")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
+#[allow(deprecated)]
+pub fn new<S>(s: S) -> Concat<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+{
+    Concat {
+        inner: new_safe(s)
+    }
+}
+
+#[allow(deprecated)]
+impl<S> Future for Concat<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.inner.poll().map(|a| {
+            match a {
+                Async::NotReady => Async::NotReady,
+                Async::Ready(None) => panic!("attempted concatenation of empty stream"),
+                Async::Ready(Some(e)) => Async::Ready(e)
+            }
+        })
+    }
+}
+
+
+#[derive(Debug)]
+struct ConcatSafe<S>
+    where S: Stream,
+{
+    stream: S,
+    extend: Inner<S::Item>,
+}
+
+fn new_safe<S>(s: S) -> ConcatSafe<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+{
+    ConcatSafe {
+        stream: s,
+        extend: Inner::First,
+    }
+}
+
+impl<S> Future for ConcatSafe<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+
+{
+    type Item = Option<S::Item>;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        loop {
+            match self.stream.poll() {
+                Ok(Async::Ready(Some(i))) => {
+                    match self.extend {
+                        Inner::First => {
+                            self.extend = Inner::Extending(i);
+                        },
+                        Inner::Extending(ref mut e) => {
+                            e.extend(i);
+                        },
+                        Inner::Done => unreachable!(),
+                    }
+                },
+                Ok(Async::Ready(None)) => {
+                    match mem::replace(&mut self.extend, Inner::Done) {
+                        Inner::First => return Ok(Async::Ready(None)),
+                        Inner::Extending(e) => return Ok(Async::Ready(Some(e))),
+                        Inner::Done => panic!("cannot poll Concat again")
+                    }
+                },
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Err(e) => {
+                    self.extend = Inner::Done;
+                    return Err(e)
+                }
+            }
+        }
+    }
+}
+
+
+#[derive(Debug)]
+enum Inner<E> {
+    First,
+    Extending(E),
+    Done,
+}
\ No newline at end of file
diff --git a/rustc_deps/vendor/futures/src/stream/empty.rs b/rustc_deps/vendor/futures/src/stream/empty.rs
new file mode 100644
index 0000000..c53fb80
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/empty.rs
@@ -0,0 +1,29 @@
+use core::marker;
+
+use stream::Stream;
+use {Poll, Async};
+
+/// A stream which contains no elements.
+///
+/// This stream can be created with the `stream::empty` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Empty<T, E> {
+    _data: marker::PhantomData<(T, E)>,
+}
+
+/// Creates a stream which contains no elements.
+///
+/// The returned stream will always return `Ready(None)` when polled.
+pub fn empty<T, E>() -> Empty<T, E> {
+    Empty { _data: marker::PhantomData }
+}
+
+impl<T, E> Stream for Empty<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        Ok(Async::Ready(None))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/filter.rs b/rustc_deps/vendor/futures/src/stream/filter.rs
new file mode 100644
index 0000000..99c4abd
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/filter.rs
@@ -0,0 +1,89 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator used to filter the results of a stream and only yield
+/// some values.
+///
+/// This structure is produced by the `Stream::filter` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Filter<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F>(s: S, f: F) -> Filter<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> bool,
+{
+    Filter {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> Filter<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Filter<S, F>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F> Stream for Filter<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> bool,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        loop {
+            match try_ready!(self.stream.poll()) {
+                Some(e) => {
+                    if (self.f)(&e) {
+                        return Ok(Async::Ready(Some(e)))
+                    }
+                }
+                None => return Ok(Async::Ready(None)),
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/filter_map.rs b/rustc_deps/vendor/futures/src/stream/filter_map.rs
new file mode 100644
index 0000000..f91d26a
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/filter_map.rs
@@ -0,0 +1,89 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A combinator used to filter the results of a stream and simultaneously map
+/// them to a different type.
+///
+/// This structure is returned by the `Stream::filter_map` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct FilterMap<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Option<B>,
+{
+    FilterMap {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> FilterMap<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for FilterMap<S, F>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, B> Stream for FilterMap<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Option<B>,
+{
+    type Item = B;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<B>, S::Error> {
+        loop {
+            match try_ready!(self.stream.poll()) {
+                Some(e) => {
+                    if let Some(e) = (self.f)(e) {
+                        return Ok(Async::Ready(Some(e)))
+                    }
+                }
+                None => return Ok(Async::Ready(None)),
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/flatten.rs b/rustc_deps/vendor/futures/src/stream/flatten.rs
new file mode 100644
index 0000000..4baf904
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/flatten.rs
@@ -0,0 +1,96 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A combinator used to flatten a stream-of-streams into one long stream of
+/// elements.
+///
+/// This combinator is created by the `Stream::flatten` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Flatten<S>
+    where S: Stream,
+{
+    stream: S,
+    next: Option<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Flatten<S>
+    where S: Stream,
+          S::Item: Stream,
+          <S::Item as Stream>::Error: From<S::Error>,
+{
+    Flatten {
+        stream: s,
+        next: None,
+    }
+}
+
+impl<S: Stream> Flatten<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Flatten<S>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S> Stream for Flatten<S>
+    where S: Stream,
+          S::Item: Stream,
+          <S::Item as Stream>::Error: From<S::Error>,
+{
+    type Item = <S::Item as Stream>::Item;
+    type Error = <S::Item as Stream>::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        loop {
+            if self.next.is_none() {
+                match try_ready!(self.stream.poll()) {
+                    Some(e) => self.next = Some(e),
+                    None => return Ok(Async::Ready(None)),
+                }
+            }
+            assert!(self.next.is_some());
+            match self.next.as_mut().unwrap().poll() {
+                Ok(Async::Ready(None)) => self.next = None,
+                other => return other,
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/fold.rs b/rustc_deps/vendor/futures/src/stream/fold.rs
new file mode 100644
index 0000000..7fa24b4
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/fold.rs
@@ -0,0 +1,81 @@
+use core::mem;
+
+use {Future, Poll, IntoFuture, Async};
+use stream::Stream;
+
+/// A future used to collect all the results of a stream into one generic type.
+///
+/// This future is returned by the `Stream::fold` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Fold<S, F, Fut, T> where Fut: IntoFuture {
+    stream: S,
+    f: F,
+    state: State<T, Fut::Future>,
+}
+
+#[derive(Debug)]
+enum State<T, F> where F: Future {
+    /// Placeholder state when doing work
+    Empty,
+
+    /// Ready to process the next stream item; current accumulator is the `T`
+    Ready(T),
+
+    /// Working on a future the process the previous stream item
+    Processing(F),
+}
+
+pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
+    where S: Stream,
+          F: FnMut(T, S::Item) -> Fut,
+          Fut: IntoFuture<Item = T>,
+          S::Error: From<Fut::Error>,
+{
+    Fold {
+        stream: s,
+        f: f,
+        state: State::Ready(t),
+    }
+}
+
+impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
+    where S: Stream,
+          F: FnMut(T, S::Item) -> Fut,
+          Fut: IntoFuture<Item = T>,
+          S::Error: From<Fut::Error>,
+{
+    type Item = T;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<T, S::Error> {
+        loop {
+            match mem::replace(&mut self.state, State::Empty) {
+                State::Empty => panic!("cannot poll Fold twice"),
+                State::Ready(state) => {
+                    match self.stream.poll()? {
+                        Async::Ready(Some(e)) => {
+                            let future = (self.f)(state, e);
+                            let future = future.into_future();
+                            self.state = State::Processing(future);
+                        }
+                        Async::Ready(None) => return Ok(Async::Ready(state)),
+                        Async::NotReady => {
+                            self.state = State::Ready(state);
+                            return Ok(Async::NotReady)
+                        }
+                    }
+                }
+                State::Processing(mut fut) => {
+                    match fut.poll()? {
+                        Async::Ready(state) => self.state = State::Ready(state),
+                        Async::NotReady => {
+                            self.state = State::Processing(fut);
+                            return Ok(Async::NotReady)
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/for_each.rs b/rustc_deps/vendor/futures/src/stream/for_each.rs
new file mode 100644
index 0000000..c7e1cde
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/for_each.rs
@@ -0,0 +1,51 @@
+use {Async, Future, IntoFuture, Poll};
+use stream::Stream;
+
+/// A stream combinator which executes a unit closure over each item on a
+/// stream.
+///
+/// This structure is returned by the `Stream::for_each` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct ForEach<S, F, U> where U: IntoFuture {
+    stream: S,
+    f: F,
+    fut: Option<U::Future>,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> ForEach<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+          U: IntoFuture<Item = (), Error = S::Error>,
+{
+    ForEach {
+        stream: s,
+        f: f,
+        fut: None,
+    }
+}
+
+impl<S, F, U> Future for ForEach<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+          U: IntoFuture<Item= (), Error = S::Error>,
+{
+    type Item = ();
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<(), S::Error> {
+        loop {
+            if let Some(mut fut) = self.fut.take() {
+                if fut.poll()?.is_not_ready() {
+                    self.fut = Some(fut);
+                    return Ok(Async::NotReady);
+                }
+            }
+
+            match try_ready!(self.stream.poll()) {
+                Some(e) => self.fut = Some((self.f)(e).into_future()),
+                None => return Ok(Async::Ready(())),
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/forward.rs b/rustc_deps/vendor/futures/src/stream/forward.rs
new file mode 100644
index 0000000..2ecde10
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/forward.rs
@@ -0,0 +1,110 @@
+use {Poll, Async, Future, AsyncSink};
+use stream::{Stream, Fuse};
+use sink::Sink;
+
+/// Future for the `Stream::forward` combinator, which sends a stream of values
+/// to a sink and then waits until the sink has fully flushed those values.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Forward<T: Stream, U> {
+    sink: Option<U>,
+    stream: Option<Fuse<T>>,
+    buffered: Option<T::Item>,
+}
+
+
+pub fn new<T, U>(stream: T, sink: U) -> Forward<T, U>
+    where U: Sink<SinkItem=T::Item>,
+          T: Stream,
+          T::Error: From<U::SinkError>,
+{
+    Forward {
+        sink: Some(sink),
+        stream: Some(stream.fuse()),
+        buffered: None,
+    }
+}
+
+impl<T, U> Forward<T, U>
+    where U: Sink<SinkItem=T::Item>,
+          T: Stream,
+          T::Error: From<U::SinkError>,
+{
+    /// Get a shared reference to the inner sink.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn sink_ref(&self) -> Option<&U> {
+        self.sink.as_ref()
+    }
+
+    /// Get a mutable reference to the inner sink.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn sink_mut(&mut self) -> Option<&mut U> {
+        self.sink.as_mut()
+    }
+
+    /// Get a shared reference to the inner stream.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn stream_ref(&self) -> Option<&T> {
+        self.stream.as_ref().map(|x| x.get_ref())
+    }
+
+    /// Get a mutable reference to the inner stream.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn stream_mut(&mut self) -> Option<&mut T> {
+        self.stream.as_mut().map(|x| x.get_mut())
+    }
+
+    fn take_result(&mut self) -> (T, U) {
+        let sink = self.sink.take()
+            .expect("Attempted to poll Forward after completion");
+        let fuse = self.stream.take()
+            .expect("Attempted to poll Forward after completion");
+        (fuse.into_inner(), sink)
+    }
+
+    fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> {
+        debug_assert!(self.buffered.is_none());
+        if let AsyncSink::NotReady(item) = self.sink_mut()
+            .take().expect("Attempted to poll Forward after completion")
+            .start_send(item)?
+        {
+            self.buffered = Some(item);
+            return Ok(Async::NotReady)
+        }
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<T, U> Future for Forward<T, U>
+    where U: Sink<SinkItem=T::Item>,
+          T: Stream,
+          T::Error: From<U::SinkError>,
+{
+    type Item = (T, U);
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<(T, U), T::Error> {
+        // If we've got an item buffered already, we need to write it to the
+        // sink before we can do anything else
+        if let Some(item) = self.buffered.take() {
+            try_ready!(self.try_start_send(item))
+        }
+
+        loop {
+            match self.stream_mut()
+                .take().expect("Attempted to poll Forward after completion")
+                .poll()?
+            {
+                Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
+                Async::Ready(None) => {
+                    try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").close());
+                    return Ok(Async::Ready(self.take_result()))
+                }
+                Async::NotReady => {
+                    try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").poll_complete());
+                    return Ok(Async::NotReady)
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/from_err.rs b/rustc_deps/vendor/futures/src/stream/from_err.rs
new file mode 100644
index 0000000..4028542
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/from_err.rs
@@ -0,0 +1,80 @@
+use core::marker::PhantomData;
+use poll::Poll;
+use Async;
+use stream::Stream;
+
+/// A stream combinator to change the error type of a stream.
+///
+/// This is created by the `Stream::from_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct FromErr<S, E> {
+    stream: S,
+    f: PhantomData<E>
+}
+
+pub fn new<S, E>(stream: S) -> FromErr<S, E>
+    where S: Stream
+{
+    FromErr {
+        stream: stream,
+        f: PhantomData
+    }
+}
+
+impl<S, E> FromErr<S, E> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+
+impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> {
+    type Item = S::Item;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, E> {
+        let e = match self.stream.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            other => other,
+        };
+        e.map_err(From::from)
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S: Stream + ::sink::Sink, E> ::sink::Sink for FromErr<S, E> {
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: Self::SinkItem) -> ::StartSend<Self::SinkItem, Self::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        self.stream.close()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/fuse.rs b/rustc_deps/vendor/futures/src/stream/fuse.rs
new file mode 100644
index 0000000..e39c31f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/fuse.rs
@@ -0,0 +1,89 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream which "fuse"s a stream once it's terminated.
+///
+/// Normally streams can behave unpredictably when used after they have already
+/// finished, but `Fuse` continues to return `None` from `poll` forever when
+/// finished.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Fuse<S> {
+    stream: S,
+    done: bool,
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Fuse<S>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+pub fn new<S: Stream>(s: S) -> Fuse<S> {
+    Fuse { stream: s, done: false }
+}
+
+impl<S: Stream> Stream for Fuse<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        if self.done {
+            Ok(Async::Ready(None))
+        } else {
+            let r = self.stream.poll();
+            if let Ok(Async::Ready(None)) = r {
+                self.done = true;
+            }
+            r
+        }
+    }
+}
+
+impl<S> Fuse<S> {
+    /// Returns whether the underlying stream has finished or not.
+    ///
+    /// If this method returns `true`, then all future calls to poll are
+    /// guaranteed to return `None`. If this returns `false`, then the
+    /// underlying stream is still in use.
+    pub fn is_done(&self) -> bool {
+        self.done
+    }
+
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/future.rs b/rustc_deps/vendor/futures/src/stream/future.rs
new file mode 100644
index 0000000..5b052ee
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/future.rs
@@ -0,0 +1,76 @@
+use {Future, Poll, Async};
+use stream::Stream;
+
+/// A combinator used to temporarily convert a stream into a future.
+///
+/// This future is returned by the `Stream::into_future` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct StreamFuture<S> {
+    stream: Option<S>,
+}
+
+pub fn new<S: Stream>(s: S) -> StreamFuture<S> {
+    StreamFuture { stream: Some(s) }
+}
+
+impl<S> StreamFuture<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    ///
+    /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+    /// implementation of `Future::poll` consumes the underlying stream during polling 
+    /// in order to return it to the caller of `Future::poll` if the stream yielded
+    /// an element.
+    pub fn get_ref(&self) -> Option<&S> {
+        self.stream.as_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    ///
+    /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+    /// implementation of `Future::poll` consumes the underlying stream during polling 
+    /// in order to return it to the caller of `Future::poll` if the stream yielded
+    /// an element.
+    pub fn get_mut(&mut self) -> Option<&mut S> {
+        self.stream.as_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    ///
+    /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+    /// implementation of `Future::poll` consumes the underlying stream during polling 
+    /// in order to return it to the caller of `Future::poll` if the stream yielded
+    /// an element.
+    pub fn into_inner(self) -> Option<S> {
+        self.stream
+    }
+}
+
+impl<S: Stream> Future for StreamFuture<S> {
+    type Item = (Option<S::Item>, S);
+    type Error = (S::Error, S);
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let item = {
+            let s = self.stream.as_mut().expect("polling StreamFuture twice");
+            match s.poll() {
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Ok(Async::Ready(e)) => Ok(e),
+                Err(e) => Err(e),
+            }
+        };
+        let stream = self.stream.take().unwrap();
+        match item {
+            Ok(e) => Ok(Async::Ready((e, stream))),
+            Err(e) => Err((e, stream)),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/futures_ordered.rs b/rustc_deps/vendor/futures/src/stream/futures_ordered.rs
new file mode 100644
index 0000000..64e2d6f
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/futures_ordered.rs
@@ -0,0 +1,213 @@
+use std::cmp::{Eq, PartialEq, PartialOrd, Ord, Ordering};
+use std::collections::BinaryHeap;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+
+use {Async, Future, IntoFuture, Poll, Stream};
+use stream::FuturesUnordered;
+
+#[derive(Debug)]
+struct OrderWrapper<T> {
+    item: T,
+    index: usize,
+}
+
+impl<T> PartialEq for OrderWrapper<T> {
+    fn eq(&self, other: &Self) -> bool {
+        self.index == other.index
+    }
+}
+
+impl<T> Eq for OrderWrapper<T> {}
+
+impl<T> PartialOrd for OrderWrapper<T> {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl<T> Ord for OrderWrapper<T> {
+    fn cmp(&self, other: &Self) -> Ordering {
+        // BinaryHeap is a max heap, so compare backwards here.
+        other.index.cmp(&self.index)
+    }
+}
+
+impl<T> Future for OrderWrapper<T>
+    where T: Future
+{
+    type Item = OrderWrapper<T::Item>;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let result = try_ready!(self.item.poll());
+        Ok(Async::Ready(OrderWrapper {
+            item: result,
+            index: self.index
+        }))
+    }
+}
+
+/// An unbounded queue of futures.
+///
+/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order
+/// on top of the set of futures. While futures in the set will race to
+/// completion in parallel, results will only be returned in the order their
+/// originating futures were added to the queue.
+///
+/// Futures are pushed into this queue and their realized values are yielded in
+/// order. This structure is optimized to manage a large number of futures.
+/// Futures managed by `FuturesOrdered` will only be polled when they generate
+/// notifications. This reduces the required amount of work needed to coordinate
+/// large numbers of futures.
+///
+/// When a `FuturesOrdered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the queue using `push`; however, the
+/// future will **not** be polled at this point. `FuturesOrdered` will only
+/// poll managed futures when `FuturesOrdered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesOrdered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the queue is currently not managing any futures. A future may be submitted
+/// to the queue at a later time. At that point, a call to
+/// `FuturesOrdered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed. When
+/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will
+/// return `Ok(Async::NotReady)` until the first future completes, even if
+/// some of the later futures have already completed.
+///
+/// Note that you can create a ready-made `FuturesOrdered` via the
+/// `futures_ordered` function in the `stream` module, or you can start with an
+/// empty queue with the `FuturesOrdered::new` constructor.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesOrdered<T>
+    where T: Future
+{
+    in_progress: FuturesUnordered<OrderWrapper<T>>,
+    queued_results: BinaryHeap<OrderWrapper<T::Item>>,
+    next_incoming_index: usize,
+    next_outgoing_index: usize,
+}
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that their originating
+/// futures were submitted to the queue. If the futures complete out of order,
+/// items will be stored internally within `FuturesOrdered` until all preceding
+/// items have been yielded.
+///
+/// Note that the returned queue can also be used to dynamically push more
+/// futures into the queue as they become available.
+pub fn futures_ordered<I>(futures: I) -> FuturesOrdered<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+          I::Item: IntoFuture
+{
+    let mut queue = FuturesOrdered::new();
+
+    for future in futures {
+        queue.push(future.into_future());
+    }
+
+    return queue
+}
+
+impl<T> FuturesOrdered<T>
+    where T: Future
+{
+    /// Constructs a new, empty `FuturesOrdered`
+    ///
+    /// The returned `FuturesOrdered` does not contain any futures and, in this
+    /// state, `FuturesOrdered::poll` will return `Ok(Async::Ready(None))`.
+    pub fn new() -> FuturesOrdered<T> {
+        FuturesOrdered {
+            in_progress: FuturesUnordered::new(),
+            queued_results: BinaryHeap::new(),
+            next_incoming_index: 0,
+            next_outgoing_index: 0,
+        }
+    }
+
+    /// Returns the number of futures contained in the queue.
+    ///
+    /// This represents the total number of in-flight futures, both
+    /// those currently processing and those that have completed but
+    /// which are waiting for earlier futures to complete.
+    pub fn len(&self) -> usize {
+        self.in_progress.len() + self.queued_results.len()
+    }
+
+    /// Returns `true` if the queue contains no futures
+    pub fn is_empty(&self) -> bool {
+        self.in_progress.is_empty() && self.queued_results.is_empty()
+    }
+
+    /// Push a future into the queue.
+    ///
+    /// This function submits the given future to the internal set for managing.
+    /// This function will not call `poll` on the submitted future. The caller
+    /// must ensure that `FuturesOrdered::poll` is called in order to receive
+    /// task notifications.
+    pub fn push(&mut self, future: T) {
+        let wrapped = OrderWrapper {
+            item: future,
+            index: self.next_incoming_index,
+        };
+        self.next_incoming_index += 1;
+        self.in_progress.push(wrapped);
+    }
+}
+
+impl<T> Stream for FuturesOrdered<T>
+    where T: Future
+{
+    type Item = T::Item;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        // Get any completed futures from the unordered set.
+        loop {
+            match self.in_progress.poll()? {
+                Async::Ready(Some(result)) => self.queued_results.push(result),
+                Async::Ready(None) | Async::NotReady => break,
+            }
+        }
+
+        if let Some(next_result) = self.queued_results.peek() {
+            // PeekMut::pop is not stable yet QQ
+            if next_result.index != self.next_outgoing_index {
+                return Ok(Async::NotReady);
+            }
+        } else if !self.in_progress.is_empty() {
+            return Ok(Async::NotReady);
+        } else {
+            return Ok(Async::Ready(None));
+        }
+
+        let next_result = self.queued_results.pop().unwrap();
+        self.next_outgoing_index += 1;
+        Ok(Async::Ready(Some(next_result.item)))
+    }
+}
+
+impl<T: Debug> Debug for FuturesOrdered<T>
+    where T: Future
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "FuturesOrdered {{ ... }}")
+    }
+}
+
+impl<F: Future> FromIterator<F> for FuturesOrdered<F> {
+    fn from_iter<T>(iter: T) -> Self 
+        where T: IntoIterator<Item = F>
+    {
+        let mut new = FuturesOrdered::new();
+        for future in iter.into_iter() {
+            new.push(future);
+        }
+        new
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/futures_unordered.rs b/rustc_deps/vendor/futures/src/stream/futures_unordered.rs
new file mode 100644
index 0000000..2940fd3
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/futures_unordered.rs
@@ -0,0 +1,672 @@
+//! An unbounded set of futures.
+
+use std::cell::UnsafeCell;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel};
+use std::sync::atomic::{AtomicPtr, AtomicBool};
+use std::sync::{Arc, Weak};
+use std::usize;
+
+use {task, Stream, Future, Poll, Async};
+use executor::{Notify, UnsafeNotify, NotifyHandle};
+use task_impl::{self, AtomicTask};
+
+/// An unbounded set of futures.
+///
+/// This "combinator" also serves a special function in this library, providing
+/// the ability to maintain a set of futures that and manage driving them all
+/// to completion.
+///
+/// Futures are pushed into this set and their realized values are yielded as
+/// they are ready. This structure is optimized to manage a large number of
+/// futures. Futures managed by `FuturesUnordered` will only be polled when they
+/// generate notifications. This reduces the required amount of work needed to
+/// coordinate large numbers of futures.
+///
+/// When a `FuturesUnordered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the set using `push`; however, the
+/// future will **not** be polled at this point. `FuturesUnordered` will only
+/// poll managed futures when `FuturesUnordered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesUnordered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the set is currently not managing any futures. A future may be submitted
+/// to the set at a later time. At that point, a call to
+/// `FuturesUnordered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed.
+///
+/// Note that you can create a ready-made `FuturesUnordered` via the
+/// `futures_unordered` function in the `stream` module, or you can start with an
+/// empty set with the `FuturesUnordered::new` constructor.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesUnordered<F> {
+    inner: Arc<Inner<F>>,
+    len: usize,
+    head_all: *const Node<F>,
+}
+
+unsafe impl<T: Send> Send for FuturesUnordered<T> {}
+unsafe impl<T: Sync> Sync for FuturesUnordered<T> {}
+
+// FuturesUnordered is implemented using two linked lists. One which links all
+// futures managed by a `FuturesUnordered` and one that tracks futures that have
+// been scheduled for polling. The first linked list is not thread safe and is
+// only accessed by the thread that owns the `FuturesUnordered` value. The
+// second linked list is an implementation of the intrusive MPSC queue algorithm
+// described by 1024cores.net.
+//
+// When a future is submitted to the set a node is allocated and inserted in
+// both linked lists. The next call to `poll` will (eventually) see this node
+// and call `poll` on the future.
+//
+// Before a managed future is polled, the current task's `Notify` is replaced
+// with one that is aware of the specific future being run. This ensures that
+// task notifications generated by that specific future are visible to
+// `FuturesUnordered`. When a notification is received, the node is scheduled
+// for polling by being inserted into the concurrent linked list.
+//
+// Each node uses an `AtomicUsize` to track it's state. The node state is the
+// reference count (the number of outstanding handles to the node) as well as a
+// flag tracking if the node is currently inserted in the atomic queue. When the
+// future is notified, it will only insert itself into the linked list if it
+// isn't currently inserted.
+
+#[allow(missing_debug_implementations)]
+struct Inner<T> {
+    // The task using `FuturesUnordered`.
+    parent: AtomicTask,
+
+    // Head/tail of the readiness queue
+    head_readiness: AtomicPtr<Node<T>>,
+    tail_readiness: UnsafeCell<*const Node<T>>,
+    stub: Arc<Node<T>>,
+}
+
+struct Node<T> {
+    // The future
+    future: UnsafeCell<Option<T>>,
+
+    // Next pointer for linked list tracking all active nodes
+    next_all: UnsafeCell<*const Node<T>>,
+
+    // Previous node in linked list tracking all active nodes
+    prev_all: UnsafeCell<*const Node<T>>,
+
+    // Next pointer in readiness queue
+    next_readiness: AtomicPtr<Node<T>>,
+
+    // Queue that we'll be enqueued to when notified
+    queue: Weak<Inner<T>>,
+
+    // Whether or not this node is currently in the mpsc queue.
+    queued: AtomicBool,
+}
+
+enum Dequeue<T> {
+    Data(*const Node<T>),
+    Empty,
+    Inconsistent,
+}
+
+impl<T> FuturesUnordered<T>
+    where T: Future,
+{
+    /// Constructs a new, empty `FuturesUnordered`
+    ///
+    /// The returned `FuturesUnordered` does not contain any futures and, in this
+    /// state, `FuturesUnordered::poll` will return `Ok(Async::Ready(None))`.
+    pub fn new() -> FuturesUnordered<T> {
+        let stub = Arc::new(Node {
+            future: UnsafeCell::new(None),
+            next_all: UnsafeCell::new(ptr::null()),
+            prev_all: UnsafeCell::new(ptr::null()),
+            next_readiness: AtomicPtr::new(ptr::null_mut()),
+            queued: AtomicBool::new(true),
+            queue: Weak::new(),
+        });
+        let stub_ptr = &*stub as *const Node<T>;
+        let inner = Arc::new(Inner {
+            parent: AtomicTask::new(),
+            head_readiness: AtomicPtr::new(stub_ptr as *mut _),
+            tail_readiness: UnsafeCell::new(stub_ptr),
+            stub: stub,
+        });
+
+        FuturesUnordered {
+            len: 0,
+            head_all: ptr::null_mut(),
+            inner: inner,
+        }
+    }
+}
+
+impl<T> FuturesUnordered<T> {
+    /// Returns the number of futures contained in the set.
+    ///
+    /// This represents the total number of in-flight futures.
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Returns `true` if the set contains no futures
+    pub fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Push a future into the set.
+    ///
+    /// This function submits the given future to the set for managing. This
+    /// function will not call `poll` on the submitted future. The caller must
+    /// ensure that `FuturesUnordered::poll` is called in order to receive task
+    /// notifications.
+    pub fn push(&mut self, future: T) {
+        let node = Arc::new(Node {
+            future: UnsafeCell::new(Some(future)),
+            next_all: UnsafeCell::new(ptr::null_mut()),
+            prev_all: UnsafeCell::new(ptr::null_mut()),
+            next_readiness: AtomicPtr::new(ptr::null_mut()),
+            queued: AtomicBool::new(true),
+            queue: Arc::downgrade(&self.inner),
+        });
+
+        // Right now our node has a strong reference count of 1. We transfer
+        // ownership of this reference count to our internal linked list
+        // and we'll reclaim ownership through the `unlink` function below.
+        let ptr = self.link(node);
+
+        // We'll need to get the future "into the system" to start tracking it,
+        // e.g. getting its unpark notifications going to us tracking which
+        // futures are ready. To do that we unconditionally enqueue it for
+        // polling here.
+        self.inner.enqueue(ptr);
+    }
+
+    /// Returns an iterator that allows modifying each future in the set.
+    pub fn iter_mut(&mut self) -> IterMut<T> {
+        IterMut {
+            node: self.head_all,
+            len: self.len,
+            _marker: PhantomData
+        }
+    }
+
+    fn release_node(&mut self, node: Arc<Node<T>>) {
+        // The future is done, try to reset the queued flag. This will prevent
+        // `notify` from doing any work in the future
+        let prev = node.queued.swap(true, SeqCst);
+
+        // Drop the future, even if it hasn't finished yet. This is safe
+        // because we're dropping the future on the thread that owns
+        // `FuturesUnordered`, which correctly tracks T's lifetimes and such.
+        unsafe {
+            drop((*node.future.get()).take());
+        }
+
+        // If the queued flag was previously set then it means that this node
+        // is still in our internal mpsc queue. We then transfer ownership
+        // of our reference count to the mpsc queue, and it'll come along and
+        // free it later, noticing that the future is `None`.
+        //
+        // If, however, the queued flag was *not* set then we're safe to
+        // release our reference count on the internal node. The queued flag
+        // was set above so all future `enqueue` operations will not actually
+        // enqueue the node, so our node will never see the mpsc queue again.
+        // The node itself will be deallocated once all reference counts have
+        // been dropped by the various owning tasks elsewhere.
+        if prev {
+            mem::forget(node);
+        }
+    }
+
+    /// Insert a new node into the internal linked list.
+    fn link(&mut self, node: Arc<Node<T>>) -> *const Node<T> {
+        let ptr = arc2ptr(node);
+        unsafe {
+            *(*ptr).next_all.get() = self.head_all;
+            if !self.head_all.is_null() {
+                *(*self.head_all).prev_all.get() = ptr;
+            }
+        }
+
+        self.head_all = ptr;
+        self.len += 1;
+        return ptr
+    }
+
+    /// Remove the node from the linked list tracking all nodes currently
+    /// managed by `FuturesUnordered`.
+    unsafe fn unlink(&mut self, node: *const Node<T>) -> Arc<Node<T>> {
+        let node = ptr2arc(node);
+        let next = *node.next_all.get();
+        let prev = *node.prev_all.get();
+        *node.next_all.get() = ptr::null_mut();
+        *node.prev_all.get() = ptr::null_mut();
+
+        if !next.is_null() {
+            *(*next).prev_all.get() = prev;
+        }
+
+        if !prev.is_null() {
+            *(*prev).next_all.get() = next;
+        } else {
+            self.head_all = next;
+        }
+        self.len -= 1;
+        return node
+    }
+}
+
+impl<T> Stream for FuturesUnordered<T>
+    where T: Future
+{
+    type Item = T::Item;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> {
+        // Ensure `parent` is correctly set.
+        self.inner.parent.register();
+
+        loop {
+            let node = match unsafe { self.inner.dequeue() } {
+                Dequeue::Empty => {
+                    if self.is_empty() {
+                        return Ok(Async::Ready(None));
+                    } else {
+                        return Ok(Async::NotReady)
+                    }
+                }
+                Dequeue::Inconsistent => {
+                    // At this point, it may be worth yielding the thread &
+                    // spinning a few times... but for now, just yield using the
+                    // task system.
+                    task::current().notify();
+                    return Ok(Async::NotReady);
+                }
+                Dequeue::Data(node) => node,
+            };
+
+            debug_assert!(node != self.inner.stub());
+
+            unsafe {
+                let mut future = match (*(*node).future.get()).take() {
+                    Some(future) => future,
+
+                    // If the future has already gone away then we're just
+                    // cleaning out this node. See the comment in
+                    // `release_node` for more information, but we're basically
+                    // just taking ownership of our reference count here.
+                    None => {
+                        let node = ptr2arc(node);
+                        assert!((*node.next_all.get()).is_null());
+                        assert!((*node.prev_all.get()).is_null());
+                        continue
+                    }
+                };
+
+                // Unset queued flag... this must be done before
+                // polling. This ensures that the future gets
+                // rescheduled if it is notified **during** a call
+                // to `poll`.
+                let prev = (*node).queued.swap(false, SeqCst);
+                assert!(prev);
+
+                // We're going to need to be very careful if the `poll`
+                // function below panics. We need to (a) not leak memory and
+                // (b) ensure that we still don't have any use-after-frees. To
+                // manage this we do a few things:
+                //
+                // * This "bomb" here will call `release_node` if dropped
+                //   abnormally. That way we'll be sure the memory management
+                //   of the `node` is managed correctly.
+                // * The future was extracted above (taken ownership). That way
+                //   if it panics we're guaranteed that the future is
+                //   dropped on this thread and doesn't accidentally get
+                //   dropped on a different thread (bad).
+                // * We unlink the node from our internal queue to preemptively
+                //   assume it'll panic, in which case we'll want to discard it
+                //   regardless.
+                struct Bomb<'a, T: 'a> {
+                    queue: &'a mut FuturesUnordered<T>,
+                    node: Option<Arc<Node<T>>>,
+                }
+                impl<'a, T> Drop for Bomb<'a, T> {
+                    fn drop(&mut self) {
+                        if let Some(node) = self.node.take() {
+                            self.queue.release_node(node);
+                        }
+                    }
+                }
+                let mut bomb = Bomb {
+                    node: Some(self.unlink(node)),
+                    queue: self,
+                };
+
+                // Poll the underlying future with the appropriate `notify`
+                // implementation. This is where a large bit of the unsafety
+                // starts to stem from internally. The `notify` instance itself
+                // is basically just our `Arc<Node<T>>` and tracks the mpsc
+                // queue of ready futures.
+                //
+                // Critically though `Node<T>` won't actually access `T`, the
+                // future, while it's floating around inside of `Task`
+                // instances. These structs will basically just use `T` to size
+                // the internal allocation, appropriately accessing fields and
+                // deallocating the node if need be.
+                let res = {
+                    let notify = NodeToHandle(bomb.node.as_ref().unwrap());
+                    task_impl::with_notify(&notify, 0, || {
+                        future.poll()
+                    })
+                };
+
+                let ret = match res {
+                    Ok(Async::NotReady) => {
+                        let node = bomb.node.take().unwrap();
+                        *node.future.get() = Some(future);
+                        bomb.queue.link(node);
+                        continue
+                    }
+                    Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
+                    Err(e) => Err(e),
+                };
+                return ret
+            }
+        }
+    }
+}
+
+impl<T: Debug> Debug for FuturesUnordered<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "FuturesUnordered {{ ... }}")
+    }
+}
+
+impl<T> Drop for FuturesUnordered<T> {
+    fn drop(&mut self) {
+        // When a `FuturesUnordered` is dropped we want to drop all futures associated
+        // with it. At the same time though there may be tons of `Task` handles
+        // flying around which contain `Node<T>` references inside them. We'll
+        // let those naturally get deallocated when the `Task` itself goes out
+        // of scope or gets notified.
+        unsafe {
+            while !self.head_all.is_null() {
+                let head = self.head_all;
+                let node = self.unlink(head);
+                self.release_node(node);
+            }
+        }
+
+        // Note that at this point we could still have a bunch of nodes in the
+        // mpsc queue. None of those nodes, however, have futures associated
+        // with them so they're safe to destroy on any thread. At this point
+        // the `FuturesUnordered` struct, the owner of the one strong reference
+        // to `Inner<T>` will drop the strong reference. At that point
+        // whichever thread releases the strong refcount last (be it this
+        // thread or some other thread as part of an `upgrade`) will clear out
+        // the mpsc queue and free all remaining nodes.
+        //
+        // While that freeing operation isn't guaranteed to happen here, it's
+        // guaranteed to happen "promptly" as no more "blocking work" will
+        // happen while there's a strong refcount held.
+    }
+}
+
+impl<F: Future> FromIterator<F> for FuturesUnordered<F> {
+    fn from_iter<T>(iter: T) -> Self 
+        where T: IntoIterator<Item = F>
+    {
+        let mut new = FuturesUnordered::new();
+        for future in iter.into_iter() {
+            new.push(future);
+        }
+        new
+    }
+}
+
+#[derive(Debug)]
+/// Mutable iterator over all futures in the unordered set.
+pub struct IterMut<'a, F: 'a> {
+    node: *const Node<F>,
+    len: usize,
+    _marker: PhantomData<&'a mut FuturesUnordered<F>>
+}
+
+impl<'a, F> Iterator for IterMut<'a, F> {
+    type Item = &'a mut F;
+
+    fn next(&mut self) -> Option<&'a mut F> {
+        if self.node.is_null() {
+            return None;
+        }
+        unsafe {
+            let future = (*(*self.node).future.get()).as_mut().unwrap();
+            let next = *(*self.node).next_all.get();
+            self.node = next;
+            self.len -= 1;
+            return Some(future);
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
+    }
+}
+
+impl<'a, F> ExactSizeIterator for IterMut<'a, F> {}
+
+impl<T> Inner<T> {
+    /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
+    fn enqueue(&self, node: *const Node<T>) {
+        unsafe {
+            debug_assert!((*node).queued.load(Relaxed));
+
+            // This action does not require any coordination
+            (*node).next_readiness.store(ptr::null_mut(), Relaxed);
+
+            // Note that these atomic orderings come from 1024cores
+            let node = node as *mut _;
+            let prev = self.head_readiness.swap(node, AcqRel);
+            (*prev).next_readiness.store(node, Release);
+        }
+    }
+
+    /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
+    ///
+    /// Note that this unsafe as it required mutual exclusion (only one thread
+    /// can call this) to be guaranteed elsewhere.
+    unsafe fn dequeue(&self) -> Dequeue<T> {
+        let mut tail = *self.tail_readiness.get();
+        let mut next = (*tail).next_readiness.load(Acquire);
+
+        if tail == self.stub() {
+            if next.is_null() {
+                return Dequeue::Empty;
+            }
+
+            *self.tail_readiness.get() = next;
+            tail = next;
+            next = (*next).next_readiness.load(Acquire);
+        }
+
+        if !next.is_null() {
+            *self.tail_readiness.get() = next;
+            debug_assert!(tail != self.stub());
+            return Dequeue::Data(tail);
+        }
+
+        if self.head_readiness.load(Acquire) as *const _ != tail {
+            return Dequeue::Inconsistent;
+        }
+
+        self.enqueue(self.stub());
+
+        next = (*tail).next_readiness.load(Acquire);
+
+        if !next.is_null() {
+            *self.tail_readiness.get() = next;
+            return Dequeue::Data(tail);
+        }
+
+        Dequeue::Inconsistent
+    }
+
+    fn stub(&self) -> *const Node<T> {
+        &*self.stub
+    }
+}
+
+impl<T> Drop for Inner<T> {
+    fn drop(&mut self) {
+        // Once we're in the destructor for `Inner<T>` we need to clear out the
+        // mpsc queue of nodes if there's anything left in there.
+        //
+        // Note that each node has a strong reference count associated with it
+        // which is owned by the mpsc queue. All nodes should have had their
+        // futures dropped already by the `FuturesUnordered` destructor above,
+        // so we're just pulling out nodes and dropping their refcounts.
+        unsafe {
+            loop {
+                match self.dequeue() {
+                    Dequeue::Empty => break,
+                    Dequeue::Inconsistent => abort("inconsistent in drop"),
+                    Dequeue::Data(ptr) => drop(ptr2arc(ptr)),
+                }
+            }
+        }
+    }
+}
+
+#[allow(missing_debug_implementations)]
+struct NodeToHandle<'a, T: 'a>(&'a Arc<Node<T>>);
+
+impl<'a, T> Clone for NodeToHandle<'a, T> {
+    fn clone(&self) -> Self {
+        NodeToHandle(self.0)
+    }
+}
+
+impl<'a, T> From<NodeToHandle<'a, T>> for NotifyHandle {
+    fn from(handle: NodeToHandle<'a, T>) -> NotifyHandle {
+        unsafe {
+            let ptr = handle.0.clone();
+            let ptr = mem::transmute::<Arc<Node<T>>, *mut ArcNode<T>>(ptr);
+            NotifyHandle::new(hide_lt(ptr))
+        }
+    }
+}
+
+struct ArcNode<T>(PhantomData<T>);
+
+// We should never touch `T` on any thread other than the one owning
+// `FuturesUnordered`, so this should be a safe operation.
+unsafe impl<T> Send for ArcNode<T> {}
+unsafe impl<T> Sync for ArcNode<T> {}
+
+impl<T> Notify for ArcNode<T> {
+    fn notify(&self, _id: usize) {
+        unsafe {
+            let me: *const ArcNode<T> = self;
+            let me: *const *const ArcNode<T> = &me;
+            let me = me as *const Arc<Node<T>>;
+            Node::notify(&*me)
+        }
+    }
+}
+
+unsafe impl<T> UnsafeNotify for ArcNode<T> {
+    unsafe fn clone_raw(&self) -> NotifyHandle {
+        let me: *const ArcNode<T> = self;
+        let me: *const *const ArcNode<T> = &me;
+        let me = &*(me as *const Arc<Node<T>>);
+        NodeToHandle(me).into()
+    }
+
+    unsafe fn drop_raw(&self) {
+        let mut me: *const ArcNode<T> = self;
+        let me = &mut me as *mut *const ArcNode<T> as *mut Arc<Node<T>>;
+        ptr::drop_in_place(me);
+    }
+}
+
+unsafe fn hide_lt<T>(p: *mut ArcNode<T>) -> *mut UnsafeNotify {
+    mem::transmute(p as *mut UnsafeNotify)
+}
+
+impl<T> Node<T> {
+    fn notify(me: &Arc<Node<T>>) {
+        let inner = match me.queue.upgrade() {
+            Some(inner) => inner,
+            None => return,
+        };
+
+        // It's our job to notify the node that it's ready to get polled,
+        // meaning that we need to enqueue it into the readiness queue. To
+        // do this we flag that we're ready to be queued, and if successful
+        // we then do the literal queueing operation, ensuring that we're
+        // only queued once.
+        //
+        // Once the node is inserted we be sure to notify the parent task,
+        // as it'll want to come along and pick up our node now.
+        //
+        // Note that we don't change the reference count of the node here,
+        // we're just enqueueing the raw pointer. The `FuturesUnordered`
+        // implementation guarantees that if we set the `queued` flag true that
+        // there's a reference count held by the main `FuturesUnordered` queue
+        // still.
+        let prev = me.queued.swap(true, SeqCst);
+        if !prev {
+            inner.enqueue(&**me);
+            inner.parent.notify();
+        }
+    }
+}
+
+impl<T> Drop for Node<T> {
+    fn drop(&mut self) {
+        // Currently a `Node<T>` is sent across all threads for any lifetime,
+        // regardless of `T`. This means that for memory safety we can't
+        // actually touch `T` at any time except when we have a reference to the
+        // `FuturesUnordered` itself.
+        //
+        // Consequently it *should* be the case that we always drop futures from
+        // the `FuturesUnordered` instance, but this is a bomb in place to catch
+        // any bugs in that logic.
+        unsafe {
+            if (*self.future.get()).is_some() {
+                abort("future still here when dropping");
+            }
+        }
+    }
+}
+
+fn arc2ptr<T>(ptr: Arc<T>) -> *const T {
+    let addr = &*ptr as *const T;
+    mem::forget(ptr);
+    return addr
+}
+
+unsafe fn ptr2arc<T>(ptr: *const T) -> Arc<T> {
+    let anchor = mem::transmute::<usize, Arc<T>>(0x10);
+    let addr = &*anchor as *const T;
+    mem::forget(anchor);
+    let offset = addr as isize - 0x10;
+    mem::transmute::<isize, Arc<T>>(ptr as isize - offset)
+}
+
+fn abort(s: &str) -> ! {
+    struct DoublePanic;
+
+    impl Drop for DoublePanic {
+        fn drop(&mut self) {
+            panic!("panicking twice to abort the program");
+        }
+    }
+
+    let _bomb = DoublePanic;
+    panic!("{}", s);
+}
diff --git a/rustc_deps/vendor/futures/src/stream/inspect.rs b/rustc_deps/vendor/futures/src/stream/inspect.rs
new file mode 100644
index 0000000..fc8f7f4
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/inspect.rs
@@ -0,0 +1,84 @@
+use {Stream, Poll, Async};
+
+/// Do something with the items of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Inspect<S, F> where S: Stream {
+    stream: S,
+    inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> Inspect<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> (),
+{
+    Inspect {
+        stream: stream,
+        inspect: f,
+    }
+}
+
+impl<S: Stream, F> Inspect<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Inspect<S, F>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F> Stream for Inspect<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item),
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        match try_ready!(self.stream.poll()) {
+            Some(e) => {
+                (self.inspect)(&e);
+                Ok(Async::Ready(Some(e)))
+            }
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/inspect_err.rs b/rustc_deps/vendor/futures/src/stream/inspect_err.rs
new file mode 100644
index 0000000..5c56a21
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/inspect_err.rs
@@ -0,0 +1,81 @@
+use {Stream, Poll};
+
+/// Do something with the error of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect_err` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct InspectErr<S, F> where S: Stream {
+    stream: S,
+    inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> InspectErr<S, F>
+    where S: Stream,
+          F: FnMut(&S::Error) -> (),
+{
+    InspectErr {
+        stream: stream,
+        inspect: f,
+    }
+}
+
+impl<S: Stream, F> InspectErr<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for InspectErr<S, F>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F> Stream for InspectErr<S, F>
+    where S: Stream,
+          F: FnMut(&S::Error),
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.stream.poll().map_err(|e| {
+            (self.inspect)(&e);
+            e
+        })
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/iter.rs b/rustc_deps/vendor/futures/src/stream/iter.rs
new file mode 100644
index 0000000..e0b9379
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/iter.rs
@@ -0,0 +1,46 @@
+#![deprecated(note = "implementation moved to `iter_ok` and `iter_result`")]
+#![allow(deprecated)]
+
+use Poll;
+use stream::{iter_result, IterResult, Stream};
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Iter<I>(IterResult<I>);
+
+/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter(vec![Ok(17), Err(false), Ok(19)]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Err(false), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+#[inline]
+pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
+    where J: IntoIterator<Item=Result<T, E>>,
+{
+    Iter(iter_result(i))
+}
+
+impl<I, T, E> Stream for Iter<I>
+    where I: Iterator<Item=Result<T, E>>,
+{
+    type Item = T;
+    type Error = E;
+
+    #[inline]
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        self.0.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/iter_ok.rs b/rustc_deps/vendor/futures/src/stream/iter_ok.rs
new file mode 100644
index 0000000..9c8d871
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/iter_ok.rs
@@ -0,0 +1,48 @@
+use core::marker;
+
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterOk<I, E> {
+    iter: I,
+    _marker: marker::PhantomData<fn() -> E>,
+}
+
+/// Converts an `Iterator` into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter
+/// simply always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_ok::<_, ()>(vec![17, 19]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_ok<I, E>(i: I) -> IterOk<I::IntoIter, E>
+    where I: IntoIterator,
+{
+    IterOk {
+        iter: i.into_iter(),
+        _marker: marker::PhantomData,
+    }
+}
+
+impl<I, E> Stream for IterOk<I, E>
+    where I: Iterator,
+{
+    type Item = I::Item;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<I::Item>, E> {
+        Ok(Async::Ready(self.iter.next()))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/iter_result.rs b/rustc_deps/vendor/futures/src/stream/iter_result.rs
new file mode 100644
index 0000000..4eef5da
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/iter_result.rs
@@ -0,0 +1,51 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterResult<I> {
+    iter: I,
+}
+
+/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_result(vec![Ok(17), Err(false), Ok(19)]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Err(false), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_result<J, T, E>(i: J) -> IterResult<J::IntoIter>
+where
+    J: IntoIterator<Item = Result<T, E>>,
+{
+    IterResult {
+        iter: i.into_iter(),
+    }
+}
+
+impl<I, T, E> Stream for IterResult<I>
+where
+    I: Iterator<Item = Result<T, E>>,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match self.iter.next() {
+            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+            Some(Err(e)) => Err(e),
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/map.rs b/rustc_deps/vendor/futures/src/stream/map.rs
new file mode 100644
index 0000000..702e980
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/map.rs
@@ -0,0 +1,81 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator which will change the type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Map<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Map<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+{
+    Map {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> Map<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Map<S, F>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, U> Stream for Map<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> U,
+{
+    type Item = U;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<U>, S::Error> {
+        let option = try_ready!(self.stream.poll());
+        Ok(Async::Ready(option.map(&mut self.f)))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/map_err.rs b/rustc_deps/vendor/futures/src/stream/map_err.rs
new file mode 100644
index 0000000..8d1c0fc
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/map_err.rs
@@ -0,0 +1,80 @@
+use Poll;
+use stream::Stream;
+
+/// A stream combinator which will change the error type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map_err` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct MapErr<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F>
+    where S: Stream,
+          F: FnMut(S::Error) -> U,
+{
+    MapErr {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> MapErr<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for MapErr<S, F>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, U> Stream for MapErr<S, F>
+    where S: Stream,
+          F: FnMut(S::Error) -> U,
+{
+    type Item = S::Item;
+    type Error = U;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, U> {
+        self.stream.poll().map_err(&mut self.f)
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/merge.rs b/rustc_deps/vendor/futures/src/stream/merge.rs
new file mode 100644
index 0000000..af7505e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/merge.rs
@@ -0,0 +1,82 @@
+#![deprecated(note = "functionality provided by `select` now")]
+#![allow(deprecated)]
+
+use {Poll, Async};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from one or both of the underlying
+/// streams as they become available. Errors, however, are not merged: you
+/// get at most one error at a time.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Merge<S1, S2: Stream> {
+    stream1: Fuse<S1>,
+    stream2: Fuse<S2>,
+    queued_error: Option<S2::Error>,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Merge<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    Merge {
+        stream1: stream1.fuse(),
+        stream2: stream2.fuse(),
+        queued_error: None,
+    }
+}
+
+/// An item returned from a merge stream, which represents an item from one or
+/// both of the underlying streams.
+#[derive(Debug)]
+pub enum MergedItem<I1, I2> {
+    /// An item from the first stream
+    First(I1),
+    /// An item from the second stream
+    Second(I2),
+    /// Items from both streams
+    Both(I1, I2),
+}
+
+impl<S1, S2> Stream for Merge<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    type Item = MergedItem<S1::Item, S2::Item>;
+    type Error = S1::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        if let Some(e) = self.queued_error.take() {
+            return Err(e)
+        }
+
+        match self.stream1.poll()? {
+            Async::NotReady => {
+                match try_ready!(self.stream2.poll()) {
+                    Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
+                    None => Ok(Async::NotReady),
+                }
+            }
+            Async::Ready(None) => {
+                match try_ready!(self.stream2.poll()) {
+                    Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
+                    None => Ok(Async::Ready(None)),
+                }
+            }
+            Async::Ready(Some(item1)) => {
+                match self.stream2.poll() {
+                    Err(e) => {
+                        self.queued_error = Some(e);
+                        Ok(Async::Ready(Some(MergedItem::First(item1))))
+                    }
+                    Ok(Async::NotReady) | Ok(Async::Ready(None)) => {
+                        Ok(Async::Ready(Some(MergedItem::First(item1))))
+                    }
+                    Ok(Async::Ready(Some(item2))) => {
+                        Ok(Async::Ready(Some(MergedItem::Both(item1, item2))))
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/mod.rs b/rustc_deps/vendor/futures/src/stream/mod.rs
new file mode 100644
index 0000000..b674aef
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/mod.rs
@@ -0,0 +1,1145 @@
+//! Asynchronous streams
+//!
+//! This module contains the `Stream` trait and a number of adaptors for this
+//! trait. This trait is very similar to the `Iterator` trait in the standard
+//! library except that it expresses the concept of blocking as well. A stream
+//! here is a sequential sequence of values which may take some amount of time
+//! in between to produce.
+//!
+//! A stream may request that it is blocked between values while the next value
+//! is calculated, and provides a way to get notified once the next value is
+//! ready as well.
+//!
+//! You can find more information/tutorials about streams [online at
+//! https://tokio.rs][online]
+//!
+//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+
+use {IntoFuture, Poll};
+
+mod iter;
+#[allow(deprecated)]
+pub use self::iter::{iter, Iter};
+#[cfg(feature = "with-deprecated")]
+#[allow(deprecated)]
+pub use self::Iter as IterStream;
+mod iter_ok;
+pub use self::iter_ok::{iter_ok, IterOk};
+mod iter_result;
+pub use self::iter_result::{iter_result, IterResult};
+
+mod repeat;
+pub use self::repeat::{repeat, Repeat};
+
+mod and_then;
+mod chain;
+mod concat;
+mod empty;
+mod filter;
+mod filter_map;
+mod flatten;
+mod fold;
+mod for_each;
+mod from_err;
+mod fuse;
+mod future;
+mod inspect;
+mod inspect_err;
+mod map;
+mod map_err;
+mod merge;
+mod once;
+mod or_else;
+mod peek;
+mod poll_fn;
+mod select;
+mod skip;
+mod skip_while;
+mod take;
+mod take_while;
+mod then;
+mod unfold;
+mod zip;
+mod forward;
+pub use self::and_then::AndThen;
+pub use self::chain::Chain;
+#[allow(deprecated)]
+pub use self::concat::Concat;
+pub use self::concat::Concat2;
+pub use self::empty::{Empty, empty};
+pub use self::filter::Filter;
+pub use self::filter_map::FilterMap;
+pub use self::flatten::Flatten;
+pub use self::fold::Fold;
+pub use self::for_each::ForEach;
+pub use self::from_err::FromErr;
+pub use self::fuse::Fuse;
+pub use self::future::StreamFuture;
+pub use self::inspect::Inspect;
+pub use self::inspect_err::InspectErr;
+pub use self::map::Map;
+pub use self::map_err::MapErr;
+#[allow(deprecated)]
+pub use self::merge::{Merge, MergedItem};
+pub use self::once::{Once, once};
+pub use self::or_else::OrElse;
+pub use self::peek::Peekable;
+pub use self::poll_fn::{poll_fn, PollFn};
+pub use self::select::Select;
+pub use self::skip::Skip;
+pub use self::skip_while::SkipWhile;
+pub use self::take::Take;
+pub use self::take_while::TakeWhile;
+pub use self::then::Then;
+pub use self::unfold::{Unfold, unfold};
+pub use self::zip::Zip;
+pub use self::forward::Forward;
+use sink::{Sink};
+
+if_std! {
+    use std;
+
+    mod buffered;
+    mod buffer_unordered;
+    mod catch_unwind;
+    mod chunks;
+    mod collect;
+    mod wait;
+    mod channel;
+    mod split;
+    pub mod futures_unordered;
+    mod futures_ordered;
+    pub use self::buffered::Buffered;
+    pub use self::buffer_unordered::BufferUnordered;
+    pub use self::catch_unwind::CatchUnwind;
+    pub use self::chunks::Chunks;
+    pub use self::collect::Collect;
+    pub use self::wait::Wait;
+    pub use self::split::{SplitStream, SplitSink, ReuniteError};
+    pub use self::futures_unordered::FuturesUnordered;
+    pub use self::futures_ordered::{futures_ordered, FuturesOrdered};
+
+    #[doc(hidden)]
+    #[cfg(feature = "with-deprecated")]
+    #[allow(deprecated)]
+    pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError};
+
+    /// A type alias for `Box<Stream + Send>`
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+    pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>;
+
+    impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> {
+        type Item = S::Item;
+        type Error = S::Error;
+
+        fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+            (**self).poll()
+        }
+    }
+}
+
+/// A stream of values, not all of which may have been produced yet.
+///
+/// `Stream` is a trait to represent any source of sequential events or items
+/// which acts like an iterator but long periods of time may pass between
+/// items. Like `Future` the methods of `Stream` never block and it is thus
+/// suitable for programming in an asynchronous fashion. This trait is very
+/// similar to the `Iterator` trait in the standard library where `Some` is
+/// used to signal elements of the stream and `None` is used to indicate that
+/// the stream is finished.
+///
+/// Like futures a stream has basic combinators to transform the stream, perform
+/// more work on each item, etc.
+///
+/// You can find more information/tutorials about streams [online at
+/// https://tokio.rs][online]
+///
+/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+///
+/// # Streams as Futures
+///
+/// Any instance of `Stream` can also be viewed as a `Future` where the resolved
+/// value is the next item in the stream along with the rest of the stream. The
+/// `into_future` adaptor can be used here to convert any stream into a future
+/// for use with other future methods like `join` and `select`.
+///
+/// # Errors
+///
+/// Streams, like futures, can also model errors in their computation. All
+/// streams have an associated `Error` type like with futures. Currently as of
+/// the 0.1 release of this library an error on a stream **does not terminate
+/// the stream**. That is, after one error is received, another error may be
+/// received from the same stream (it's valid to keep polling).
+///
+/// This property of streams, however, is [being considered] for change in 0.2
+/// where an error on a stream is similar to `None`, it terminates the stream
+/// entirely. If one of these use cases suits you perfectly and not the other,
+/// please feel welcome to comment on [the issue][being considered]!
+///
+/// [being considered]: https://github.com/rust-lang-nursery/futures-rs/issues/206
+pub trait Stream {
+    /// The type of item this stream will yield on success.
+    type Item;
+
+    /// The type of error this stream may generate.
+    type Error;
+
+    /// Attempt to pull out the next value of this stream, returning `None` if
+    /// the stream is finished.
+    ///
+    /// This method, like `Future::poll`, is the sole method of pulling out a
+    /// value from a stream. This method must also be run within the context of
+    /// a task typically and implementors of this trait must ensure that
+    /// implementations of this method do not block, as it may cause consumers
+    /// to behave badly.
+    ///
+    /// # Return value
+    ///
+    /// If `NotReady` is returned then this stream's next value is not ready
+    /// yet and implementations will ensure that the current task will be
+    /// notified when the next value may be ready. If `Some` is returned then
+    /// the returned value represents the next value on the stream. `Err`
+    /// indicates an error happened, while `Ok` indicates whether there was a
+    /// new item on the stream or whether the stream has terminated.
+    ///
+    /// # Panics
+    ///
+    /// Once a stream is finished, that is `Ready(None)` has been returned,
+    /// further calls to `poll` may result in a panic or other "bad behavior".
+    /// If this is difficult to guard against then the `fuse` adapter can be
+    /// used to ensure that `poll` always has well-defined semantics.
+    // TODO: more here
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error>;
+
+    // TODO: should there also be a method like `poll` but doesn't return an
+    //       item? basically just says "please make more progress internally"
+    //       seems crucial for buffering to actually make any sense.
+
+    /// Creates an iterator which blocks the current thread until each item of
+    /// this stream is resolved.
+    ///
+    /// This method will consume ownership of this stream, returning an
+    /// implementation of a standard iterator. This iterator will *block the
+    /// current thread* on each call to `next` if the item in the stream isn't
+    /// ready yet.
+    ///
+    /// > **Note:** This method is not appropriate to call on event loops or
+    /// >           similar I/O situations because it will prevent the event
+    /// >           loop from making progress (this blocks the thread). This
+    /// >           method should only be called when it's guaranteed that the
+    /// >           blocking work associated with this stream will be completed
+    /// >           by another thread.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Panics
+    ///
+    /// The returned iterator does not attempt to catch panics. If the `poll`
+    /// function panics, panics will be propagated to the caller of `next`.
+    #[cfg(feature = "use_std")]
+    fn wait(self) -> Wait<Self>
+        where Self: Sized
+    {
+        wait::new(self)
+    }
+
+    /// Convenience function for turning this stream into a trait object.
+    ///
+    /// This simply avoids the need to write `Box::new` and can often help with
+    /// type inference as well by always returning a trait object. Note that
+    /// this method requires the `Send` bound and returns a `BoxStream`, which
+    /// also encodes this. If you'd like to create a `Box<Stream>` without the
+    /// `Send` bound, then the `Box::new` function can be used instead.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel(1);
+    /// let a: BoxStream<i32, ()> = rx.boxed();
+    /// ```
+    #[cfg(feature = "use_std")]
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+    #[allow(deprecated)]
+    fn boxed(self) -> BoxStream<Self::Item, Self::Error>
+        where Self: Sized + Send + 'static,
+    {
+        ::std::boxed::Box::new(self)
+    }
+
+    /// Converts this stream into a `Future`.
+    ///
+    /// A stream can be viewed as a future which will resolve to a pair containing
+    /// the next element of the stream plus the remaining stream. If the stream
+    /// terminates, then the next element is `None` and the remaining stream is
+    /// still passed back, to allow reclamation of its resources.
+    ///
+    /// The returned future can be used to compose streams and futures together by
+    /// placing everything into the "world of futures".
+    fn into_future(self) -> StreamFuture<Self>
+        where Self: Sized
+    {
+        future::new(self)
+    }
+
+    /// Converts a stream of type `T` to a stream of type `U`.
+    ///
+    /// The provided closure is executed over all elements of this stream as
+    /// they are made available, and the callback will be executed inline with
+    /// calls to `poll`.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it, similar to the existing `map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    /// let rx = rx.map(|x| x + 3);
+    /// ```
+    fn map<U, F>(self, f: F) -> Map<Self, F>
+        where F: FnMut(Self::Item) -> U,
+              Self: Sized
+    {
+        map::new(self, f)
+    }
+
+    /// Converts a stream of error type `T` to a stream of error type `U`.
+    ///
+    /// The provided closure is executed over all errors of this stream as
+    /// they are made available, and the callback will be executed inline with
+    /// calls to `poll`.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it, similar to the existing `map_err` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    /// let rx = rx.map_err(|()| 3);
+    /// ```
+    fn map_err<U, F>(self, f: F) -> MapErr<Self, F>
+        where F: FnMut(Self::Error) -> U,
+              Self: Sized
+    {
+        map_err::new(self, f)
+    }
+
+    /// Filters the values produced by this stream according to the provided
+    /// predicate.
+    ///
+    /// As values of this stream are made available, the provided predicate will
+    /// be run against them. If the predicate returns `true` then the stream
+    /// will yield the value, but if the predicate returns `false` then the
+    /// value will be discarded and the next value will be produced.
+    ///
+    /// All errors are passed through without filtering in this combinator.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it, similar to the existing `filter` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    /// let evens = rx.filter(|x| x % 2 == 0);
+    /// ```
+    fn filter<F>(self, f: F) -> Filter<Self, F>
+        where F: FnMut(&Self::Item) -> bool,
+              Self: Sized
+    {
+        filter::new(self, f)
+    }
+
+    /// Filters the values produced by this stream while simultaneously mapping
+    /// them to a different type.
+    ///
+    /// As values of this stream are made available, the provided function will
+    /// be run on them. If the predicate returns `Some(e)` then the stream will
+    /// yield the value `e`, but if the predicate returns `None` then the next
+    /// value will be produced.
+    ///
+    /// All errors are passed through without filtering in this combinator.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it, similar to the existing `filter_map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    /// let evens_plus_one = rx.filter_map(|x| {
+    ///     if x % 0 == 2 {
+    ///         Some(x + 1)
+    ///     } else {
+    ///         None
+    ///     }
+    /// });
+    /// ```
+    fn filter_map<F, B>(self, f: F) -> FilterMap<Self, F>
+        where F: FnMut(Self::Item) -> Option<B>,
+              Self: Sized
+    {
+        filter_map::new(self, f)
+    }
+
+    /// Chain on a computation for when a value is ready, passing the resulting
+    /// item to the provided closure `f`.
+    ///
+    /// This function can be used to ensure a computation runs regardless of
+    /// the next value on the stream. The closure provided will be yielded a
+    /// `Result` once a value is ready, and the returned future will then be run
+    /// to completion to produce the next value on this stream.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    ///
+    /// let rx = rx.then(|result| {
+    ///     match result {
+    ///         Ok(e) => Ok(e + 3),
+    ///         Err(()) => Err(4),
+    ///     }
+    /// });
+    /// ```
+    fn then<F, U>(self, f: F) -> Then<Self, F, U>
+        where F: FnMut(Result<Self::Item, Self::Error>) -> U,
+              U: IntoFuture,
+              Self: Sized
+    {
+        then::new(self, f)
+    }
+
+    /// Chain on a computation for when a value is ready, passing the successful
+    /// results to the provided closure `f`.
+    ///
+    /// This function can be used to run a unit of work when the next successful
+    /// value on a stream is ready. The closure provided will be yielded a value
+    /// when ready, and the returned future will then be run to completion to
+    /// produce the next value on this stream.
+    ///
+    /// Any errors produced by this stream will not be passed to the closure,
+    /// and will be passed through.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it.
+    ///
+    /// To process the entire stream and return a single future representing
+    /// success or error, use `for_each` instead.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (_tx, rx) = mpsc::channel::<i32>(1);
+    ///
+    /// let rx = rx.and_then(|result| {
+    ///     if result % 2 == 0 {
+    ///         Ok(result)
+    ///     } else {
+    ///         Err(())
+    ///     }
+    /// });
+    /// ```
+    fn and_then<F, U>(self, f: F) -> AndThen<Self, F, U>
+        where F: FnMut(Self::Item) -> U,
+              U: IntoFuture<Error = Self::Error>,
+              Self: Sized
+    {
+        and_then::new(self, f)
+    }
+
+    /// Chain on a computation for when an error happens, passing the
+    /// erroneous result to the provided closure `f`.
+    ///
+    /// This function can be used to run a unit of work and attempt to recover from
+    /// an error if one happens. The closure provided will be yielded an error
+    /// when one appears, and the returned future will then be run to completion
+    /// to produce the next value on this stream.
+    ///
+    /// Any successful values produced by this stream will not be passed to the
+    /// closure, and will be passed through.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it.
+    fn or_else<F, U>(self, f: F) -> OrElse<Self, F, U>
+        where F: FnMut(Self::Error) -> U,
+              U: IntoFuture<Item = Self::Item>,
+              Self: Sized
+    {
+        or_else::new(self, f)
+    }
+
+    /// Collect all of the values of this stream into a vector, returning a
+    /// future representing the result of that computation.
+    ///
+    /// This combinator will collect all successful results of this stream and
+    /// collect them into a `Vec<Self::Item>`. If an error happens then all
+    /// collected elements will be dropped and the error will be returned.
+    ///
+    /// The returned future will be resolved whenever an error happens or when
+    /// the stream returns `Ok(None)`.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::thread;
+    ///
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (mut tx, rx) = mpsc::channel(1);
+    ///
+    /// thread::spawn(|| {
+    ///     for i in (0..5).rev() {
+    ///         tx = tx.send(i + 1).wait().unwrap();
+    ///     }
+    /// });
+    ///
+    /// let mut result = rx.collect();
+    /// assert_eq!(result.wait(), Ok(vec![5, 4, 3, 2, 1]));
+    /// ```
+    #[cfg(feature = "use_std")]
+    fn collect(self) -> Collect<Self>
+        where Self: Sized
+    {
+        collect::new(self)
+    }
+
+    /// Concatenate all results of a stream into a single extendable
+    /// destination, returning a future representing the end result.
+    ///
+    /// This combinator will extend the first item with the contents
+    /// of all the successful results of the stream. If the stream is
+    /// empty, the default value will be returned. If an error occurs,
+    /// all the results will be dropped and the error will be returned.
+    ///
+    /// The name `concat2` is an intermediate measure until the release of
+    /// futures 0.2, at which point it will be renamed back to `concat`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::thread;
+    ///
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (mut tx, rx) = mpsc::channel(1);
+    ///
+    /// thread::spawn(move || {
+    ///     for i in (0..3).rev() {
+    ///         let n = i * 3;
+    ///         tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
+    ///     }
+    /// });
+    /// let result = rx.concat2();
+    /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+    /// ```
+    fn concat2(self) -> Concat2<Self>
+        where Self: Sized,
+              Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+    {
+        concat::new2(self)
+    }
+
+    /// Concatenate all results of a stream into a single extendable
+    /// destination, returning a future representing the end result.
+    ///
+    /// This combinator will extend the first item with the contents
+    /// of all the successful results of the stream. If an error occurs,
+    /// all the results will be dropped and the error will be returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::thread;
+    ///
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (mut tx, rx) = mpsc::channel(1);
+    ///
+    /// thread::spawn(move || {
+    ///     for i in (0..3).rev() {
+    ///         let n = i * 3;
+    ///         tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
+    ///     }
+    /// });
+    /// let result = rx.concat();
+    /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// It's important to note that this function will panic if the stream
+    /// is empty, which is the reason for its deprecation.
+    #[deprecated(since="0.1.14", note="please use `Stream::concat2` instead")]
+    #[allow(deprecated)]
+    fn concat(self) -> Concat<Self>
+        where Self: Sized,
+              Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+    {
+        concat::new(self)
+    }
+
+    /// Execute an accumulating computation over a stream, collecting all the
+    /// values into one final result.
+    ///
+    /// This combinator will collect all successful results of this stream
+    /// according to the closure provided. The initial state is also provided to
+    /// this method and then is returned again by each execution of the closure.
+    /// Once the entire stream has been exhausted the returned future will
+    /// resolve to this value.
+    ///
+    /// If an error happens then collected state will be dropped and the error
+    /// will be returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::future;
+    ///
+    /// let number_stream = stream::iter_ok::<_, ()>(0..6);
+    /// let sum = number_stream.fold(0, |acc, x| future::ok(acc + x));
+    /// assert_eq!(sum.wait(), Ok(15));
+    /// ```
+    fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
+        where F: FnMut(T, Self::Item) -> Fut,
+              Fut: IntoFuture<Item = T>,
+              Self::Error: From<Fut::Error>,
+              Self: Sized
+    {
+        fold::new(self, f, init)
+    }
+
+    /// Flattens a stream of streams into just one continuous stream.
+    ///
+    /// If this stream's elements are themselves streams then this combinator
+    /// will flatten out the entire stream to one long chain of elements. Any
+    /// errors are passed through without looking at them, but otherwise each
+    /// individual stream will get exhausted before moving on to the next.
+    ///
+    /// ```
+    /// use std::thread;
+    ///
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (tx1, rx1) = mpsc::channel::<i32>(1);
+    /// let (tx2, rx2) = mpsc::channel::<i32>(1);
+    /// let (tx3, rx3) = mpsc::channel(1);
+    ///
+    /// thread::spawn(|| {
+    ///     tx1.send(1).wait().unwrap()
+    ///        .send(2).wait().unwrap();
+    /// });
+    /// thread::spawn(|| {
+    ///     tx2.send(3).wait().unwrap()
+    ///        .send(4).wait().unwrap();
+    /// });
+    /// thread::spawn(|| {
+    ///     tx3.send(rx1).wait().unwrap()
+    ///        .send(rx2).wait().unwrap();
+    /// });
+    ///
+    /// let mut result = rx3.flatten().collect();
+    /// assert_eq!(result.wait(), Ok(vec![1, 2, 3, 4]));
+    /// ```
+    fn flatten(self) -> Flatten<Self>
+        where Self::Item: Stream,
+              <Self::Item as Stream>::Error: From<Self::Error>,
+              Self: Sized
+    {
+        flatten::new(self)
+    }
+
+    /// Skip elements on this stream while the predicate provided resolves to
+    /// `true`.
+    ///
+    /// This function, like `Iterator::skip_while`, will skip elements on the
+    /// stream until the `predicate` resolves to `false`. Once one element
+    /// returns false all future elements will be returned from the underlying
+    /// stream.
+    fn skip_while<P, R>(self, pred: P) -> SkipWhile<Self, P, R>
+        where P: FnMut(&Self::Item) -> R,
+              R: IntoFuture<Item=bool, Error=Self::Error>,
+              Self: Sized
+    {
+        skip_while::new(self, pred)
+    }
+
+    /// Take elements from this stream while the predicate provided resolves to
+    /// `true`.
+    ///
+    /// This function, like `Iterator::take_while`, will take elements from the
+    /// stream until the `predicate` resolves to `false`. Once one element
+    /// returns false it will always return that the stream is done.
+    fn take_while<P, R>(self, pred: P) -> TakeWhile<Self, P, R>
+        where P: FnMut(&Self::Item) -> R,
+              R: IntoFuture<Item=bool, Error=Self::Error>,
+              Self: Sized
+    {
+        take_while::new(self, pred)
+    }
+
+    /// Runs this stream to completion, executing the provided closure for each
+    /// element on the stream.
+    ///
+    /// The closure provided will be called for each item this stream resolves
+    /// to successfully, producing a future. That future will then be executed
+    /// to completion before moving on to the next item.
+    ///
+    /// The returned value is a `Future` where the `Item` type is `()` and
+    /// errors are otherwise threaded through. Any error on the stream or in the
+    /// closure will cause iteration to be halted immediately and the future
+    /// will resolve to that error.
+    ///
+    /// To process each item in the stream and produce another stream instead
+    /// of a single future, use `and_then` instead.
+    fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U>
+        where F: FnMut(Self::Item) -> U,
+              U: IntoFuture<Item=(), Error = Self::Error>,
+              Self: Sized
+    {
+        for_each::new(self, f)
+    }
+
+    /// Map this stream's error to any error implementing `From` for
+    /// this stream's `Error`, returning a new stream.
+    ///
+    /// This function does for streams what `try!` does for `Result`,
+    /// by letting the compiler infer the type of the resulting error.
+    /// Just as `map_err` above, this is useful for example to ensure
+    /// that streams have the same error type when used with
+    /// combinators.
+    ///
+    /// Note that this function consumes the receiving stream and returns a
+    /// wrapped version of it.
+    fn from_err<E: From<Self::Error>>(self) -> FromErr<Self, E>
+        where Self: Sized,
+    {
+        from_err::new(self)
+    }
+
+    /// Creates a new stream of at most `amt` items of the underlying stream.
+    ///
+    /// Once `amt` items have been yielded from this stream then it will always
+    /// return that the stream is done.
+    ///
+    /// # Errors
+    ///
+    /// Any errors yielded from underlying stream, before the desired amount of
+    /// items is reached, are passed through and do not affect the total number
+    /// of items taken.
+    fn take(self, amt: u64) -> Take<Self>
+        where Self: Sized
+    {
+        take::new(self, amt)
+    }
+
+    /// Creates a new stream which skips `amt` items of the underlying stream.
+    ///
+    /// Once `amt` items have been skipped from this stream then it will always
+    /// return the remaining items on this stream.
+    ///
+    /// # Errors
+    ///
+    /// All errors yielded from underlying stream are passed through and do not
+    /// affect the total number of items skipped.
+    fn skip(self, amt: u64) -> Skip<Self>
+        where Self: Sized
+    {
+        skip::new(self, amt)
+    }
+
+    /// Fuse a stream such that `poll` will never again be called once it has
+    /// finished.
+    ///
+    /// Currently once a stream has returned `None` from `poll` any further
+    /// calls could exhibit bad behavior such as block forever, panic, never
+    /// return, etc. If it is known that `poll` may be called after stream has
+    /// already finished, then this method can be used to ensure that it has
+    /// defined semantics.
+    ///
+    /// Once a stream has been `fuse`d and it finishes, then it will forever
+    /// return `None` from `poll`. This, unlike for the traits `poll` method,
+    /// is guaranteed.
+    ///
+    /// Also note that as soon as this stream returns `None` it will be dropped
+    /// to reclaim resources associated with it.
+    fn fuse(self) -> Fuse<Self>
+        where Self: Sized
+    {
+        fuse::new(self)
+    }
+
+    /// Borrows a stream, rather than consuming it.
+    ///
+    /// This is useful to allow applying stream adaptors while still retaining
+    /// ownership of the original stream.
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::future;
+    ///
+    /// let mut stream = stream::iter_ok::<_, ()>(1..5);
+    ///
+    /// let sum = stream.by_ref().take(2).fold(0, |a, b| future::ok(a + b)).wait();
+    /// assert_eq!(sum, Ok(3));
+    ///
+    /// // You can use the stream again
+    /// let sum = stream.take(2).fold(0, |a, b| future::ok(a + b)).wait();
+    /// assert_eq!(sum, Ok(7));
+    /// ```
+    fn by_ref(&mut self) -> &mut Self
+        where Self: Sized
+    {
+        self
+    }
+
+    /// Catches unwinding panics while polling the stream.
+    ///
+    /// Caught panic (if any) will be the last element of the resulting stream.
+    ///
+    /// In general, panics within a stream can propagate all the way out to the
+    /// task level. This combinator makes it possible to halt unwinding within
+    /// the stream itself. It's most commonly used within task executors. This
+    /// method should not be used for error handling.
+    ///
+    /// Note that this method requires the `UnwindSafe` bound from the standard
+    /// library. This isn't always applied automatically, and the standard
+    /// library provides an `AssertUnwindSafe` wrapper type to apply it
+    /// after-the fact. To assist using this method, the `Stream` trait is also
+    /// implemented for `AssertUnwindSafe<S>` where `S` implements `Stream`.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    ///
+    /// let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]);
+    /// // panic on second element
+    /// let stream_panicking = stream.map(|o| o.unwrap());
+    /// let mut iter = stream_panicking.catch_unwind().wait();
+    ///
+    /// assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
+    /// assert!(iter.next().unwrap().is_err());
+    /// assert!(iter.next().is_none());
+    /// ```
+    #[cfg(feature = "use_std")]
+    fn catch_unwind(self) -> CatchUnwind<Self>
+        where Self: Sized + std::panic::UnwindSafe
+    {
+        catch_unwind::new(self)
+    }
+
+    /// An adaptor for creating a buffered list of pending futures.
+    ///
+    /// If this stream's item can be converted into a future, then this adaptor
+    /// will buffer up to at most `amt` futures and then return results in the
+    /// same order as the underlying stream. No more than `amt` futures will be
+    /// buffered at any point in time, and less than `amt` may also be buffered
+    /// depending on the state of each future.
+    ///
+    /// The returned stream will be a stream of each future's result, with
+    /// errors passed through whenever they occur.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    #[cfg(feature = "use_std")]
+    fn buffered(self, amt: usize) -> Buffered<Self>
+        where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
+              Self: Sized
+    {
+        buffered::new(self, amt)
+    }
+
+    /// An adaptor for creating a buffered list of pending futures (unordered).
+    ///
+    /// If this stream's item can be converted into a future, then this adaptor
+    /// will buffer up to `amt` futures and then return results in the order
+    /// in which they complete. No more than `amt` futures will be buffered at
+    /// any point in time, and less than `amt` may also be buffered depending on
+    /// the state of each future.
+    ///
+    /// The returned stream will be a stream of each future's result, with
+    /// errors passed through whenever they occur.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    #[cfg(feature = "use_std")]
+    fn buffer_unordered(self, amt: usize) -> BufferUnordered<Self>
+        where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
+              Self: Sized
+    {
+        buffer_unordered::new(self, amt)
+    }
+
+    /// An adapter for merging the output of two streams.
+    ///
+    /// The merged stream produces items from one or both of the underlying
+    /// streams as they become available. Errors, however, are not merged: you
+    /// get at most one error at a time.
+    #[deprecated(note = "functionality provided by `select` now")]
+    #[allow(deprecated)]
+    fn merge<S>(self, other: S) -> Merge<Self, S>
+        where S: Stream<Error = Self::Error>,
+              Self: Sized,
+    {
+        merge::new(self, other)
+    }
+
+    /// An adapter for zipping two streams together.
+    ///
+    /// The zipped stream waits for both streams to produce an item, and then
+    /// returns that pair. If an error happens, then that error will be returned
+    /// immediately. If either stream ends then the zipped stream will also end.
+    fn zip<S>(self, other: S) -> Zip<Self, S>
+        where S: Stream<Error = Self::Error>,
+              Self: Sized,
+    {
+        zip::new(self, other)
+    }
+
+    /// Adapter for chaining two stream.
+    ///
+    /// The resulting stream emits elements from the first stream, and when
+    /// first stream reaches the end, emits the elements from the second stream.
+    ///
+    /// ```rust
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    ///
+    /// let stream1 = stream::iter_result(vec![Ok(10), Err(false)]);
+    /// let stream2 = stream::iter_result(vec![Err(true), Ok(20)]);
+    /// let mut chain = stream1.chain(stream2).wait();
+    ///
+    /// assert_eq!(Some(Ok(10)), chain.next());
+    /// assert_eq!(Some(Err(false)), chain.next());
+    /// assert_eq!(Some(Err(true)), chain.next());
+    /// assert_eq!(Some(Ok(20)), chain.next());
+    /// assert_eq!(None, chain.next());
+    /// ```
+    fn chain<S>(self, other: S) -> Chain<Self, S>
+        where S: Stream<Item = Self::Item, Error = Self::Error>,
+              Self: Sized
+    {
+        chain::new(self, other)
+    }
+
+    /// Creates a new stream which exposes a `peek` method.
+    ///
+    /// Calling `peek` returns a reference to the next item in the stream.
+    fn peekable(self) -> Peekable<Self>
+        where Self: Sized
+    {
+        peek::new(self)
+    }
+
+    /// An adaptor for chunking up items of the stream inside a vector.
+    ///
+    /// This combinator will attempt to pull items from this stream and buffer
+    /// them into a local vector. At most `capacity` items will get buffered
+    /// before they're yielded from the returned stream.
+    ///
+    /// Note that the vectors returned from this iterator may not always have
+    /// `capacity` elements. If the underlying stream ended and only a partial
+    /// vector was created, it'll be returned. Additionally if an error happens
+    /// from the underlying stream then the currently buffered items will be
+    /// yielded.
+    ///
+    /// Errors are passed through the stream unbuffered.
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic of `capacity` is zero.
+    #[cfg(feature = "use_std")]
+    fn chunks(self, capacity: usize) -> Chunks<Self>
+        where Self: Sized
+    {
+        chunks::new(self, capacity)
+    }
+
+    /// Creates a stream that selects the next element from either this stream
+    /// or the provided one, whichever is ready first.
+    ///
+    /// This combinator will attempt to pull items from both streams. Each
+    /// stream will be polled in a round-robin fashion, and whenever a stream is
+    /// ready to yield an item that item is yielded.
+    ///
+    /// The `select` function is similar to `merge` except that it requires both
+    /// streams to have the same item and error types.
+    ///
+    /// Error are passed through from either stream.
+    fn select<S>(self, other: S) -> Select<Self, S>
+        where S: Stream<Item = Self::Item, Error = Self::Error>,
+              Self: Sized,
+    {
+        select::new(self, other)
+    }
+
+    /// A future that completes after the given stream has been fully processed
+    /// into the sink, including flushing.
+    ///
+    /// This future will drive the stream to keep producing items until it is
+    /// exhausted, sending each item to the sink. It will complete once both the
+    /// stream is exhausted, and the sink has fully processed received item,
+    /// flushed successfully, and closed successfully.
+    ///
+    /// Doing `stream.forward(sink)` is roughly equivalent to
+    /// `sink.send_all(stream)`. The returned future will exhaust all items from
+    /// `self`, sending them all to `sink`. Furthermore the `sink` will be
+    /// closed and flushed.
+    ///
+    /// On completion, the pair `(stream, sink)` is returned.
+    fn forward<S>(self, sink: S) -> Forward<Self, S>
+        where S: Sink<SinkItem = Self::Item>,
+              Self::Error: From<S::SinkError>,
+              Self: Sized
+    {
+        forward::new(self, sink)
+    }
+
+    /// Splits this `Stream + Sink` object into separate `Stream` and `Sink`
+    /// objects.
+    ///
+    /// This can be useful when you want to split ownership between tasks, or
+    /// allow direct interaction between the two objects (e.g. via
+    /// `Sink::send_all`).
+    ///
+    /// This method is only available when the `use_std` feature of this
+    /// library is activated, and it is activated by default.
+    #[cfg(feature = "use_std")]
+    fn split(self) -> (SplitSink<Self>, SplitStream<Self>)
+        where Self: super::sink::Sink + Sized
+    {
+        split::split(self)
+    }
+
+    /// Do something with each item of this stream, afterwards passing it on.
+    ///
+    /// This is similar to the `Iterator::inspect` method in the standard
+    /// library where it allows easily inspecting each value as it passes
+    /// through the stream, for example to debug what's going on.
+    fn inspect<F>(self, f: F) -> Inspect<Self, F>
+        where F: FnMut(&Self::Item),
+              Self: Sized,
+    {
+        inspect::new(self, f)
+    }
+
+    /// Do something with the error of this stream, afterwards passing it on.
+    ///
+    /// This is similar to the `Stream::inspect` method where it allows
+    /// easily inspecting the error as it passes through the stream, for
+    /// example to debug what's going on.
+    fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
+        where F: FnMut(&Self::Error),
+              Self: Sized,
+    {
+        inspect_err::new(self, f)
+    }
+}
+
+impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        (**self).poll()
+    }
+}
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that they become
+/// available. This function is similar to `buffer_unordered` in that it may
+/// return items in a different order than in the list specified.
+///
+/// Note that the returned set can also be used to dynamically push more
+/// futures into the set as they become available.
+#[cfg(feature = "use_std")]
+pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+        I::Item: IntoFuture
+{
+    let mut set = FuturesUnordered::new();
+
+    for future in futures {
+        set.push(future.into_future());
+    }
+
+    return set
+}
diff --git a/rustc_deps/vendor/futures/src/stream/once.rs b/rustc_deps/vendor/futures/src/stream/once.rs
new file mode 100644
index 0000000..24fb327
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/once.rs
@@ -0,0 +1,35 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream which emits single element and then EOF.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Once<T, E>(Option<Result<T, E>>);
+
+/// Creates a stream of single element
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::once::<(), _>(Err(17));
+/// assert_eq!(Err(17), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
+    Once(Some(item))
+}
+
+impl<T, E> Stream for Once<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match self.0.take() {
+            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+            Some(Err(e)) => Err(e),
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/or_else.rs b/rustc_deps/vendor/futures/src/stream/or_else.rs
new file mode 100644
index 0000000..2d15fa2
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/or_else.rs
@@ -0,0 +1,80 @@
+use {IntoFuture, Future, Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto errors produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::or_else` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct OrElse<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> OrElse<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Error) -> U,
+          U: IntoFuture<Item=S::Item>,
+{
+    OrElse {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U> ::sink::Sink for OrElse<S, F, U>
+    where S: ::sink::Sink, U: IntoFuture
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, U> Stream for OrElse<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Error) -> U,
+          U: IntoFuture<Item=S::Item>,
+{
+    type Item = S::Item;
+    type Error = U::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, U::Error> {
+        if self.future.is_none() {
+            let item = match self.stream.poll() {
+                Ok(Async::Ready(e)) => return Ok(Async::Ready(e)),
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Err(e) => e,
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        match self.future.as_mut().unwrap().poll() {
+            Ok(Async::Ready(e)) => {
+                self.future = None;
+                Ok(Async::Ready(Some(e)))
+            }
+            Err(e) => {
+                self.future = None;
+                Err(e)
+            }
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/peek.rs b/rustc_deps/vendor/futures/src/stream/peek.rs
new file mode 100644
index 0000000..96e6576
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/peek.rs
@@ -0,0 +1,74 @@
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// A `Stream` that implements a `peek` method.
+///
+/// The `peek` method can be used to retrieve a reference
+/// to the next `Stream::Item` if available. A subsequent
+/// call to `poll` will return the owned item.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Peekable<S: Stream> {
+    stream: Fuse<S>,
+    peeked: Option<S::Item>,
+}
+
+
+pub fn new<S: Stream>(stream: S) -> Peekable<S> {
+    Peekable {
+        stream: stream.fuse(),
+        peeked: None
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Peekable<S>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S: Stream> Stream for Peekable<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        if let Some(item) = self.peeked.take() {
+            return Ok(Async::Ready(Some(item)))
+        }
+        self.stream.poll()
+    }
+}
+
+
+impl<S: Stream> Peekable<S> {
+    /// Peek retrieves a reference to the next item in the stream.
+    ///
+    /// This method polls the underlying stream and return either a reference
+    /// to the next item if the stream is ready or passes through any errors.
+    pub fn peek(&mut self) -> Poll<Option<&S::Item>, S::Error> {
+        if self.peeked.is_some() {
+            return Ok(Async::Ready(self.peeked.as_ref()))
+        }
+        match try_ready!(self.poll()) {
+            None => Ok(Async::Ready(None)),
+            Some(item) => {
+                self.peeked = Some(item);
+                Ok(Async::Ready(self.peeked.as_ref()))
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/poll_fn.rs b/rustc_deps/vendor/futures/src/stream/poll_fn.rs
new file mode 100644
index 0000000..fbc7df0
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/poll_fn.rs
@@ -0,0 +1,49 @@
+//! Definition of the `PollFn` combinator
+
+use {Stream, Poll};
+
+/// A stream which adapts a function returning `Poll`.
+///
+/// Created by the `poll_fn` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct PollFn<F> {
+    inner: F,
+}
+
+/// Creates a new stream wrapping around a function returning `Poll`.
+///
+/// Polling the returned stream delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::stream::poll_fn;
+/// use futures::{Async, Poll};
+///
+/// let mut counter = 1usize;
+///
+/// let read_stream = poll_fn(move || -> Poll<Option<String>, std::io::Error> {
+///     if counter == 0 { return Ok(Async::Ready(None)); }
+///     counter -= 1;
+///     Ok(Async::Ready(Some("Hello, World!".to_owned())))
+/// });
+/// ```
+pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
+where
+    F: FnMut() -> Poll<Option<T>, E>,
+{
+    PollFn { inner: f }
+}
+
+impl<T, E, F> Stream for PollFn<F>
+where
+    F: FnMut() -> Poll<Option<T>, E>,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        (self.inner)()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/repeat.rs b/rustc_deps/vendor/futures/src/stream/repeat.rs
new file mode 100644
index 0000000..e3cb5ff
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/repeat.rs
@@ -0,0 +1,53 @@
+use core::marker;
+
+
+use stream::Stream;
+
+use {Async, Poll};
+
+
+/// Stream that produces the same element repeatedly.
+///
+/// This structure is created by the `stream::repeat` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Repeat<T, E>
+    where T: Clone
+{
+    item: T,
+    error: marker::PhantomData<E>,
+}
+
+/// Create a stream which produces the same item repeatedly.
+///
+/// Stream never produces an error or EOF. Note that you likely want to avoid
+/// usage of `collect` or such on the returned stream as it will exhaust
+/// available memory as it tries to just fill up all RAM.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::repeat::<_, bool>(10);
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// ```
+pub fn repeat<T, E>(item: T) -> Repeat<T, E>
+    where T: Clone
+{
+    Repeat {
+        item: item,
+        error: marker::PhantomData,
+    }
+}
+
+impl<T, E> Stream for Repeat<T, E>
+    where T: Clone
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        Ok(Async::Ready(Some(self.item.clone())))
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/select.rs b/rustc_deps/vendor/futures/src/stream/select.rs
new file mode 100644
index 0000000..ae6b66c
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/select.rs
@@ -0,0 +1,64 @@
+use {Poll, Async};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from either of the underlying streams as
+/// they become available, and the streams are polled in a round-robin fashion.
+/// Errors, however, are not merged: you get at most one error at a time.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Select<S1, S2> {
+    stream1: Fuse<S1>,
+    stream2: Fuse<S2>,
+    flag: bool,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Select<S1, S2>
+    where S1: Stream,
+          S2: Stream<Item = S1::Item, Error = S1::Error>
+{
+    Select {
+        stream1: stream1.fuse(),
+        stream2: stream2.fuse(),
+        flag: false,
+    }
+}
+
+impl<S1, S2> Stream for Select<S1, S2>
+    where S1: Stream,
+          S2: Stream<Item = S1::Item, Error = S1::Error>
+{
+    type Item = S1::Item;
+    type Error = S1::Error;
+
+    fn poll(&mut self) -> Poll<Option<S1::Item>, S1::Error> {
+        let (a, b) = if self.flag {
+            (&mut self.stream2 as &mut Stream<Item=_, Error=_>,
+             &mut self.stream1 as &mut Stream<Item=_, Error=_>)
+        } else {
+            (&mut self.stream1 as &mut Stream<Item=_, Error=_>,
+             &mut self.stream2 as &mut Stream<Item=_, Error=_>)
+        };
+        self.flag = !self.flag;
+
+        let a_done = match a.poll()? {
+            Async::Ready(Some(item)) => return Ok(Some(item).into()),
+            Async::Ready(None) => true,
+            Async::NotReady => false,
+        };
+
+        match b.poll()? {
+            Async::Ready(Some(item)) => {
+                // If the other stream isn't finished yet, give them a chance to
+                // go first next time as we pulled something off `b`.
+                if !a_done {
+                    self.flag = !self.flag;
+                }
+                Ok(Some(item).into())
+            }
+            Async::Ready(None) if a_done => Ok(None.into()),
+            Async::Ready(None) | Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/skip.rs b/rustc_deps/vendor/futures/src/stream/skip.rs
new file mode 100644
index 0000000..a1d7b49
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/skip.rs
@@ -0,0 +1,84 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which skips a number of elements before continuing.
+///
+/// This structure is produced by the `Stream::skip` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Skip<S> {
+    stream: S,
+    remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Skip<S>
+    where S: Stream,
+{
+    Skip {
+        stream: s,
+        remaining: amt,
+    }
+}
+
+impl<S> Skip<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Skip<S>
+    where S: ::sink::Sink
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S> Stream for Skip<S>
+    where S: Stream,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        while self.remaining > 0 {
+            match try_ready!(self.stream.poll()) {
+                Some(_) => self.remaining -= 1,
+                None => return Ok(Async::Ready(None)),
+            }
+        }
+
+        self.stream.poll()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/skip_while.rs b/rustc_deps/vendor/futures/src/stream/skip_while.rs
new file mode 100644
index 0000000..b571996
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/skip_while.rs
@@ -0,0 +1,113 @@
+use {Async, Poll, IntoFuture, Future};
+use stream::Stream;
+
+/// A stream combinator which skips elements of a stream while a predicate
+/// holds.
+///
+/// This structure is produced by the `Stream::skip_while` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+    stream: S,
+    pred: P,
+    pending: Option<(R::Future, S::Item)>,
+    done_skipping: bool,
+}
+
+pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    SkipWhile {
+        stream: s,
+        pred: p,
+        pending: None,
+        done_skipping: false,
+    }
+}
+
+impl<S, P, R> SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R>
+    where S: ::sink::Sink + Stream, R: IntoFuture
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, P, R> Stream for SkipWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        if self.done_skipping {
+            return self.stream.poll();
+        }
+
+        loop {
+            if self.pending.is_none() {
+                let item = match try_ready!(self.stream.poll()) {
+                    Some(e) => e,
+                    None => return Ok(Async::Ready(None)),
+                };
+                self.pending = Some(((self.pred)(&item).into_future(), item));
+            }
+
+            assert!(self.pending.is_some());
+            match self.pending.as_mut().unwrap().0.poll() {
+                Ok(Async::Ready(true)) => self.pending = None,
+                Ok(Async::Ready(false)) => {
+                    let (_, item) = self.pending.take().unwrap();
+                    self.done_skipping = true;
+                    return Ok(Async::Ready(Some(item)))
+                }
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Err(e) => {
+                    self.pending = None;
+                    return Err(e)
+                }
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/split.rs b/rustc_deps/vendor/futures/src/stream/split.rs
new file mode 100644
index 0000000..ddaa529
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/split.rs
@@ -0,0 +1,105 @@
+use std::any::Any;
+use std::error::Error;
+use std::fmt;
+
+use {StartSend, Sink, Stream, Poll, Async, AsyncSink};
+use sync::BiLock;
+
+/// A `Stream` part of the split pair
+#[derive(Debug)]
+pub struct SplitStream<S>(BiLock<S>);
+
+impl<S> SplitStream<S> {
+    /// Attempts to put the two "halves" of a split `Stream + Sink` back
+    /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+    /// a matching pair originating from the same call to `Stream::split`.
+    pub fn reunite(self, other: SplitSink<S>) -> Result<S, ReuniteError<S>> {
+        other.reunite(self)
+    }
+}
+
+impl<S: Stream> Stream for SplitStream<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        match self.0.poll_lock() {
+            Async::Ready(mut inner) => inner.poll(),
+            Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+}
+
+/// A `Sink` part of the split pair
+#[derive(Debug)]
+pub struct SplitSink<S>(BiLock<S>);
+
+impl<S> SplitSink<S> {
+    /// Attempts to put the two "halves" of a split `Stream + Sink` back
+    /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+    /// a matching pair originating from the same call to `Stream::split`.
+    pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S>> {
+        self.0.reunite(other.0).map_err(|err| {
+            ReuniteError(SplitSink(err.0), SplitStream(err.1))
+        })
+    }
+}
+
+impl<S: Sink> Sink for SplitSink<S> {
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem)
+        -> StartSend<S::SinkItem, S::SinkError>
+    {
+        match self.0.poll_lock() {
+            Async::Ready(mut inner) => inner.start_send(item),
+            Async::NotReady => Ok(AsyncSink::NotReady(item)),
+        }
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        match self.0.poll_lock() {
+            Async::Ready(mut inner) => inner.poll_complete(),
+            Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        match self.0.poll_lock() {
+            Async::Ready(mut inner) => inner.close(),
+            Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+}
+
+pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) {
+    let (a, b) = BiLock::new(s);
+    let read = SplitStream(a);
+    let write = SplitSink(b);
+    (write, read)
+}
+
+/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves
+/// of a `Stream + Split`, and thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub SplitSink<T>, pub SplitStream<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("ReuniteError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "tried to reunite a SplitStream and SplitSink that don't form a pair")
+    }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+    fn description(&self) -> &str {
+        "tried to reunite a SplitStream and SplitSink that don't form a pair"
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/take.rs b/rustc_deps/vendor/futures/src/stream/take.rs
new file mode 100644
index 0000000..0ca6849
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/take.rs
@@ -0,0 +1,86 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator which returns a maximum number of elements.
+///
+/// This structure is produced by the `Stream::take` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Take<S> {
+    stream: S,
+    remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Take<S>
+    where S: Stream,
+{
+    Take {
+        stream: s,
+        remaining: amt,
+    }
+}
+
+impl<S> Take<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Take<S>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S> Stream for Take<S>
+    where S: Stream,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        if self.remaining == 0 {
+            Ok(Async::Ready(None))
+        } else {
+            let next = try_ready!(self.stream.poll());
+            match next {
+                Some(_) => self.remaining -= 1,
+                None => self.remaining = 0,
+            }
+            Ok(Async::Ready(next))
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/take_while.rs b/rustc_deps/vendor/futures/src/stream/take_while.rs
new file mode 100644
index 0000000..732ae85
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/take_while.rs
@@ -0,0 +1,113 @@
+use {Async, Poll, IntoFuture, Future};
+use stream::Stream;
+
+/// A stream combinator which takes elements from a stream while a predicate
+/// holds.
+///
+/// This structure is produced by the `Stream::take_while` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
+    stream: S,
+    pred: P,
+    pending: Option<(R::Future, S::Item)>,
+    done_taking: bool,
+}
+
+pub fn new<S, P, R>(s: S, p: P) -> TakeWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    TakeWhile {
+        stream: s,
+        pred: p,
+        pending: None,
+        done_taking: false,
+    }
+}
+
+impl<S, P, R> TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R>
+    where S: ::sink::Sink + Stream, R: IntoFuture
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, P, R> Stream for TakeWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        if self.done_taking {
+            return Ok(Async::Ready(None));
+        }
+
+        if self.pending.is_none() {
+            let item = match try_ready!(self.stream.poll()) {
+                Some(e) => e,
+                None => return Ok(Async::Ready(None)),
+            };
+            self.pending = Some(((self.pred)(&item).into_future(), item));
+        }
+
+        assert!(self.pending.is_some());
+        match self.pending.as_mut().unwrap().0.poll() {
+            Ok(Async::Ready(true)) => {
+                let (_, item) = self.pending.take().unwrap();
+                Ok(Async::Ready(Some(item)))
+            },
+            Ok(Async::Ready(false)) => {
+                self.done_taking = true;
+                Ok(Async::Ready(None))
+            }
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Err(e) => {
+                self.pending = None;
+                Err(e)
+            }
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/then.rs b/rustc_deps/vendor/futures/src/stream/then.rs
new file mode 100644
index 0000000..cab338e
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/then.rs
@@ -0,0 +1,81 @@
+use {Async, IntoFuture, Future, Poll};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto each item produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::then` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Then<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Then<S, F, U>
+    where S: Stream,
+          F: FnMut(Result<S::Item, S::Error>) -> U,
+          U: IntoFuture,
+{
+    Then {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U> ::sink::Sink for Then<S, F, U>
+    where S: ::sink::Sink, U: IntoFuture,
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F, U> Stream for Then<S, F, U>
+    where S: Stream,
+          F: FnMut(Result<S::Item, S::Error>) -> U,
+          U: IntoFuture,
+{
+    type Item = U::Item;
+    type Error = U::Error;
+
+    fn poll(&mut self) -> Poll<Option<U::Item>, U::Error> {
+        if self.future.is_none() {
+            let item = match self.stream.poll() {
+                Ok(Async::NotReady) => return Ok(Async::NotReady),
+                Ok(Async::Ready(None)) => return Ok(Async::Ready(None)),
+                Ok(Async::Ready(Some(e))) => Ok(e),
+                Err(e) => Err(e),
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        match self.future.as_mut().unwrap().poll() {
+            Ok(Async::Ready(e)) => {
+                self.future = None;
+                Ok(Async::Ready(Some(e)))
+            }
+            Err(e) => {
+                self.future = None;
+                Err(e)
+            }
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/unfold.rs b/rustc_deps/vendor/futures/src/stream/unfold.rs
new file mode 100644
index 0000000..ac427b8
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/unfold.rs
@@ -0,0 +1,114 @@
+use core::mem;
+
+use {Future, IntoFuture, Async, Poll};
+use stream::Stream;
+
+/// Creates a `Stream` from a seed and a closure returning a `Future`.
+///
+/// This function is the dual for the `Stream::fold()` adapter: while
+/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a
+/// `Stream` from a seed value.
+///
+/// `unfold()` will call the provided closure with the provided seed, then wait
+/// for the returned `Future` to complete with `(a, b)`. It will then yield the
+/// value `a`, and use `b` as the next internal state.
+///
+/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()`
+/// will stop producing items and return `Ok(Async::Ready(None))` in future
+/// calls to `poll()`.
+///
+/// In case of error generated by the returned `Future`, the error will be
+/// returned by the `Stream`.  The `Stream` will then yield
+/// `Ok(Async::Ready(None))` in future calls to `poll()`.
+///
+/// This function can typically be used when wanting to go from the "world of
+/// futures" to the "world of streams": the provided closure can build a
+/// `Future` using other library functions working on futures, and `unfold()`
+/// will turn it into a `Stream` by repeating the operation.
+///
+/// # Example
+///
+/// ```rust
+/// use futures::stream::{self, Stream};
+/// use futures::future::{self, Future};
+///
+/// let mut stream = stream::unfold(0, |state| {
+///     if state <= 2 {
+///         let next_state = state + 1;
+///         let yielded = state  * 2;
+///         let fut = future::ok::<_, u32>((yielded, next_state));
+///         Some(fut)
+///     } else {
+///         None
+///     }
+/// });
+///
+/// let result = stream.collect().wait();
+/// assert_eq!(result, Ok(vec![0, 2, 4]));
+/// ```
+pub fn unfold<T, F, Fut, It>(init: T, f: F) -> Unfold<T, F, Fut>
+    where F: FnMut(T) -> Option<Fut>,
+          Fut: IntoFuture<Item = (It, T)>,
+{
+    Unfold {
+        f: f,
+        state: State::Ready(init),
+    }
+}
+
+/// A stream which creates futures, polls them and return their result
+///
+/// This stream is returned by the `futures::stream::unfold` method
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Unfold<T, F, Fut> where Fut: IntoFuture {
+    f: F,
+    state: State<T, Fut::Future>,
+}
+
+impl <T, F, Fut, It> Stream for Unfold<T, F, Fut>
+    where F: FnMut(T) -> Option<Fut>,
+          Fut: IntoFuture<Item = (It, T)>,
+{
+    type Item = It;
+    type Error = Fut::Error;
+
+    fn poll(&mut self) -> Poll<Option<It>, Fut::Error> {
+        loop {
+            match mem::replace(&mut self.state, State::Empty) {
+                // State::Empty may happen if the future returned an error
+                State::Empty => { return Ok(Async::Ready(None)); }
+                State::Ready(state) => {
+                    match (self.f)(state) {
+                        Some(fut) => { self.state = State::Processing(fut.into_future()); }
+                        None => { return Ok(Async::Ready(None)); }
+                    }
+                }
+                State::Processing(mut fut) => {
+                    match fut.poll()? {
+                        Async:: Ready((item, next_state)) => {
+                            self.state = State::Ready(next_state);
+                            return Ok(Async::Ready(Some(item)));
+                        }
+                        Async::NotReady => {
+                            self.state = State::Processing(fut);
+                            return Ok(Async::NotReady);
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+#[derive(Debug)]
+enum State<T, F> where F: Future {
+    /// Placeholder state when doing work, or when the returned Future generated an error
+    Empty,
+
+    /// Ready to generate new future; current internal state is the `T`
+    Ready(T),
+
+    /// Working on a future generated previously
+    Processing(F),
+}
diff --git a/rustc_deps/vendor/futures/src/stream/wait.rs b/rustc_deps/vendor/futures/src/stream/wait.rs
new file mode 100644
index 0000000..80acb6c
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/wait.rs
@@ -0,0 +1,53 @@
+use stream::Stream;
+use executor;
+
+/// A stream combinator which converts an asynchronous stream to a **blocking
+/// iterator**.
+///
+/// Created by the `Stream::wait` method, this function transforms any stream
+/// into a standard iterator. This is implemented by blocking the current thread
+/// while items on the underlying stream aren't ready yet.
+#[must_use = "iterators do nothing unless advanced"]
+#[derive(Debug)]
+pub struct Wait<S> {
+    stream: executor::Spawn<S>,
+}
+
+impl<S> Wait<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
+}
+
+pub fn new<S: Stream>(s: S) -> Wait<S> {
+    Wait {
+        stream: executor::spawn(s),
+    }
+}
+
+impl<S: Stream> Iterator for Wait<S> {
+    type Item = Result<S::Item, S::Error>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        self.stream.wait_stream()
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/stream/zip.rs b/rustc_deps/vendor/futures/src/stream/zip.rs
new file mode 100644
index 0000000..17e3c69
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/stream/zip.rs
@@ -0,0 +1,59 @@
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from one or both of the underlying
+/// streams as they become available. Errors, however, are not merged: you
+#[derive(Debug)]
+/// get at most one error at a time.
+#[must_use = "streams do nothing unless polled"]
+pub struct Zip<S1: Stream, S2: Stream> {
+    stream1: Fuse<S1>,
+    stream2: Fuse<S2>,
+    queued1: Option<S1::Item>,
+    queued2: Option<S2::Item>,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Zip<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    Zip {
+        stream1: stream1.fuse(),
+        stream2: stream2.fuse(),
+        queued1: None,
+        queued2: None,
+    }
+}
+
+impl<S1, S2> Stream for Zip<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    type Item = (S1::Item, S2::Item);
+    type Error = S1::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        if self.queued1.is_none() {
+            match self.stream1.poll()? {
+                Async::Ready(Some(item1)) => self.queued1 = Some(item1),
+                Async::Ready(None) | Async::NotReady => {}
+            }
+        }
+        if self.queued2.is_none() {
+            match self.stream2.poll()? {
+                Async::Ready(Some(item2)) => self.queued2 = Some(item2),
+                Async::Ready(None) | Async::NotReady => {}
+            }
+        }
+
+        if self.queued1.is_some() && self.queued2.is_some() {
+            let pair = (self.queued1.take().unwrap(),
+                        self.queued2.take().unwrap());
+            Ok(Async::Ready(Some(pair)))
+        } else if self.stream1.is_done() || self.stream2.is_done() {
+            Ok(Async::Ready(None))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sync/bilock.rs b/rustc_deps/vendor/futures/src/sync/bilock.rs
new file mode 100644
index 0000000..af9e1ee
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sync/bilock.rs
@@ -0,0 +1,298 @@
+use std::any::Any;
+use std::boxed::Box;
+use std::cell::UnsafeCell;
+use std::error::Error;
+use std::fmt;
+use std::mem;
+use std::ops::{Deref, DerefMut};
+use std::sync::Arc;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+
+use {Async, Future, Poll};
+use task::{self, Task};
+
+/// A type of futures-powered synchronization primitive which is a mutex between
+/// two possible owners.
+///
+/// This primitive is not as generic as a full-blown mutex but is sufficient for
+/// many use cases where there are only two possible owners of a resource. The
+/// implementation of `BiLock` can be more optimized for just the two possible
+/// owners.
+///
+/// Note that it's possible to use this lock through a poll-style interface with
+/// the `poll_lock` method but you can also use it as a future with the `lock`
+/// method that consumes a `BiLock` and returns a future that will resolve when
+/// it's locked.
+///
+/// A `BiLock` is typically used for "split" operations where data which serves
+/// two purposes wants to be split into two to be worked with separately. For
+/// example a TCP stream could be both a reader and a writer or a framing layer
+/// could be both a stream and a sink for messages. A `BiLock` enables splitting
+/// these two and then using each independently in a futures-powered fashion.
+#[derive(Debug)]
+pub struct BiLock<T> {
+    inner: Arc<Inner<T>>,
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+    state: AtomicUsize,
+    inner: Option<UnsafeCell<T>>,
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl<T> BiLock<T> {
+    /// Creates a new `BiLock` protecting the provided data.
+    ///
+    /// Two handles to the lock are returned, and these are the only two handles
+    /// that will ever be available to the lock. These can then be sent to separate
+    /// tasks to be managed there.
+    pub fn new(t: T) -> (BiLock<T>, BiLock<T>) {
+        let inner = Arc::new(Inner {
+            state: AtomicUsize::new(0),
+            inner: Some(UnsafeCell::new(t)),
+        });
+
+        (BiLock { inner: inner.clone() }, BiLock { inner: inner })
+    }
+
+    /// Attempt to acquire this lock, returning `NotReady` if it can't be
+    /// acquired.
+    ///
+    /// This function will acquire the lock in a nonblocking fashion, returning
+    /// immediately if the lock is already held. If the lock is successfully
+    /// acquired then `Async::Ready` is returned with a value that represents
+    /// the locked value (and can be used to access the protected data). The
+    /// lock is unlocked when the returned `BiLockGuard` is dropped.
+    ///
+    /// If the lock is already held then this function will return
+    /// `Async::NotReady`. In this case the current task will also be scheduled
+    /// to receive a notification when the lock would otherwise become
+    /// available.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if called outside the context of a future's
+    /// task.
+    pub fn poll_lock(&self) -> Async<BiLockGuard<T>> {
+        loop {
+            match self.inner.state.swap(1, SeqCst) {
+                // Woohoo, we grabbed the lock!
+                0 => return Async::Ready(BiLockGuard { inner: self }),
+
+                // Oops, someone else has locked the lock
+                1 => {}
+
+                // A task was previously blocked on this lock, likely our task,
+                // so we need to update that task.
+                n => unsafe {
+                    drop(Box::from_raw(n as *mut Task));
+                }
+            }
+
+            let me = Box::new(task::current());
+            let me = Box::into_raw(me) as usize;
+
+            match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) {
+                // The lock is still locked, but we've now parked ourselves, so
+                // just report that we're scheduled to receive a notification.
+                Ok(_) => return Async::NotReady,
+
+                // Oops, looks like the lock was unlocked after our swap above
+                // and before the compare_exchange. Deallocate what we just
+                // allocated and go through the loop again.
+                Err(0) => unsafe {
+                    drop(Box::from_raw(me as *mut Task));
+                },
+
+                // The top of this loop set the previous state to 1, so if we
+                // failed the CAS above then it's because the previous value was
+                // *not* zero or one. This indicates that a task was blocked,
+                // but we're trying to acquire the lock and there's only one
+                // other reference of the lock, so it should be impossible for
+                // that task to ever block itself.
+                Err(n) => panic!("invalid state: {}", n),
+            }
+        }
+    }
+
+    /// Perform a "blocking lock" of this lock, consuming this lock handle and
+    /// returning a future to the acquired lock.
+    ///
+    /// This function consumes the `BiLock<T>` and returns a sentinel future,
+    /// `BiLockAcquire<T>`. The returned future will resolve to
+    /// `BiLockAcquired<T>` which represents a locked lock similarly to
+    /// `BiLockGuard<T>`.
+    ///
+    /// Note that the returned future will never resolve to an error.
+    pub fn lock(self) -> BiLockAcquire<T> {
+        BiLockAcquire {
+            inner: Some(self),
+        }
+    }
+
+    /// Attempts to put the two "halves" of a `BiLock<T>` back together and
+    /// recover the original value. Succeeds only if the two `BiLock<T>`s
+    /// originated from the same call to `BiLock::new`.
+    pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>> {
+        if &*self.inner as *const _ == &*other.inner as *const _ {
+            drop(other);
+            let inner = Arc::try_unwrap(self.inner)
+                .ok()
+                .expect("futures: try_unwrap failed in BiLock<T>::reunite");
+            Ok(unsafe { inner.into_inner() })
+        } else {
+            Err(ReuniteError(self, other))
+        }
+    }
+
+    fn unlock(&self) {
+        match self.inner.state.swap(0, SeqCst) {
+            // we've locked the lock, shouldn't be possible for us to see an
+            // unlocked lock.
+            0 => panic!("invalid unlocked state"),
+
+            // Ok, no one else tried to get the lock, we're done.
+            1 => {}
+
+            // Another task has parked themselves on this lock, let's wake them
+            // up as its now their turn.
+            n => unsafe {
+                Box::from_raw(n as *mut Task).notify();
+            }
+        }
+    }
+}
+
+impl<T> Inner<T> {
+    unsafe fn into_inner(mut self) -> T {
+        mem::replace(&mut self.inner, None).unwrap().into_inner()
+    }
+}
+
+impl<T> Drop for Inner<T> {
+    fn drop(&mut self) {
+        assert_eq!(self.state.load(SeqCst), 0);
+    }
+}
+
+/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
+/// thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("ReuniteError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "tried to reunite two BiLocks that don't form a pair")
+    }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+    fn description(&self) -> &str {
+        "tried to reunite two BiLocks that don't form a pair"
+    }
+}
+
+/// Returned RAII guard from the `poll_lock` method.
+///
+/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
+/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
+/// unlocked.
+#[derive(Debug)]
+pub struct BiLockGuard<'a, T: 'a> {
+    inner: &'a BiLock<T>,
+}
+
+impl<'a, T> Deref for BiLockGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        unsafe { &*self.inner.inner.inner.as_ref().unwrap().get() }
+    }
+}
+
+impl<'a, T> DerefMut for BiLockGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.inner.inner.inner.as_ref().unwrap().get() }
+    }
+}
+
+impl<'a, T> Drop for BiLockGuard<'a, T> {
+    fn drop(&mut self) {
+        self.inner.unlock();
+    }
+}
+
+/// Future returned by `BiLock::lock` which will resolve when the lock is
+/// acquired.
+#[derive(Debug)]
+pub struct BiLockAcquire<T> {
+    inner: Option<BiLock<T>>,
+}
+
+impl<T> Future for BiLockAcquire<T> {
+    type Item = BiLockAcquired<T>;
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> {
+        match self.inner.as_ref().expect("cannot poll after Ready").poll_lock() {
+            Async::Ready(r) => {
+                mem::forget(r);
+            }
+            Async::NotReady => return Ok(Async::NotReady),
+        }
+        Ok(Async::Ready(BiLockAcquired { inner: self.inner.take() }))
+    }
+}
+
+/// Resolved value of the `BiLockAcquire<T>` future.
+///
+/// This value, like `BiLockGuard<T>`, is a sentinel to the value `T` through
+/// implementations of `Deref` and `DerefMut`. When dropped will unlock the
+/// lock, and the original unlocked `BiLock<T>` can be recovered through the
+/// `unlock` method.
+#[derive(Debug)]
+pub struct BiLockAcquired<T> {
+    inner: Option<BiLock<T>>,
+}
+
+impl<T> BiLockAcquired<T> {
+    /// Recovers the original `BiLock<T>`, unlocking this lock.
+    pub fn unlock(mut self) -> BiLock<T> {
+        let bi_lock = self.inner.take().unwrap();
+
+        bi_lock.unlock();
+
+        bi_lock
+    }
+}
+
+impl<T> Deref for BiLockAcquired<T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        unsafe { &*self.inner.as_ref().unwrap().inner.inner.as_ref().unwrap().get() }
+    }
+}
+
+impl<T> DerefMut for BiLockAcquired<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.inner.as_mut().unwrap().inner.inner.as_ref().unwrap().get() }
+    }
+}
+
+impl<T> Drop for BiLockAcquired<T> {
+    fn drop(&mut self) {
+        if let Some(ref bi_lock) = self.inner {
+            bi_lock.unlock();
+        }
+    }
+}
diff --git a/rustc_deps/vendor/futures/src/sync/mod.rs b/rustc_deps/vendor/futures/src/sync/mod.rs
new file mode 100644
index 0000000..0a46e9a
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sync/mod.rs
@@ -0,0 +1,17 @@
+//! Future-aware synchronization
+//!
+//! This module, which is modeled after `std::sync`, contains user-space
+//! synchronization tools that work with futures, streams and sinks. In
+//! particular, these synchronizers do *not* block physical OS threads, but
+//! instead work at the task level.
+//!
+//! More information and examples of how to use these synchronization primitives
+//! can be found [online at tokio.rs].
+//!
+//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/synchronization/
+
+pub mod oneshot;
+pub mod mpsc;
+mod bilock;
+
+pub use self::bilock::{BiLock, BiLockGuard, BiLockAcquire, BiLockAcquired};
diff --git a/rustc_deps/vendor/futures/src/sync/mpsc/mod.rs b/rustc_deps/vendor/futures/src/sync/mpsc/mod.rs
new file mode 100644
index 0000000..31d2320
--- /dev/null
+++ b/rustc_deps/vendor/futures/src/sync/mpsc/mod.rs
@@ -0,0 +1,1187 @@
+//! A multi-producer, single-consumer, futures-aware, FIFO queue with back pressure.
+//!
+//! A channel can be used as a communication primitive between tasks running on
+//! `futures-rs` executors. Channel creation provides `Receiver` and `Sender`
+//! handles. `Receiver` implements `Stream` and allows a task to read values
+//! out of the channel. If there is no message to read from the channel, the
+//! current task will be notified when a new value is sent. `Sender` implements
+//! the `Sink` trait and allows a task to send messages into the channel. If
+//! the channel is at capacity, then send will be rejected and the task will be
+//! notified when additional capacity is available.
+//!
+//! # Disconnection
+//!
+//! When all `Sender` handles have been dropped, it is no longer possible to
+//! send values into the channel. This is considered the termination event of
+//! the stream. As such, `Sender::poll` will return `Ok(Ready(None))`.
+//!
+//! If the receiver handle is dropped, then messages can no longer be read out
+//! of the channel. In this case, a `send` will result in an error.
+//!
+//! # Clean Shutdown
+//!
+//! If the `Receiver` is simply dropped, then it is possible for there to be
+//! messages still in the channel that will not be processed. As such, it is
+//! usually desirable to perform a "clean" shutdown. To do this, the receiver
+//! will first call `close`, which will prevent any further messages to be sent
+//! into the channel. Then, the receiver consumes the channel to completion, at
+//! which point the receiver can be dropped.
+
+// At the core, the channel uses an atomic FIFO queue for message passing. This
+// queue is used as the primary coordination primitive. In order to enforce
+// capacity limits and handle back pressure, a secondary FIFO queue is used to
+// send parked task handles.
+//
+// The general idea is that the channel is created with a `buffer` size of `n`.
+// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed"
+// slot to hold a message. This allows `Sender` to know for a fact that a send
+// will succeed *before* starting to do the actual work of sending the value.
+// Since most of this work is lock-free, once the work starts, it is impossible
+// to safely revert.
+//
+// If the sender is unable to process a send operation, then the current
+// task is parked and the handle is sent on the parked task queue.
+//
+// Note that the implementation guarantees that the channel capacity will never
+// exceed the configured limit, however there is no *strict* guarantee that the
+// receiver will wake up a parked task *immediately* when a slot becomes
+// available. However, it will almost always unpark a task when a slot becomes
+// available and it is *guaranteed* that a sender will be unparked when the
+// message that caused the sender to become parked is read out of the channel.
+//
+// The steps for sending a message are roughly:
+//
+// 1) Increment the channel message count
+// 2) If the channel is at capacity, push the task handle onto the wait queue
+// 3) Push the message onto the message queue.
+//
+// The steps for receiving a message are roughly:
+//
+// 1) Pop a message from the message queue
+// 2) Pop a task handle from the wait queue
+// 3) Decrement the channel message count.
+//
+// It's important for the order of operations on lock-free structures to happen
+// in reverse order between the sender and receiver. This makes the message
+// queue the primary coordination structure and establishes the necessary
+// happens-before semantics required for the acquire / release semantics used
+// by the queue structure.
+
+use std::fmt;
+use std::error::Error;
+use std::any::Any;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Mutex};
+use std::thread;
+use std::usize;
+
+use sync::mpsc::queue::{Queue, PopResult};
+use sync::oneshot;
+use task::{self, Task};
+use future::Executor;
+use sink::SendAll;
+use resultstream::{self, Results};
+use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
+
+mod queue;
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `channel` method.
+#[derive(Debug)]
+pub struct Sender<T> {
+    // Channel state shared between the sender and receiver.
+    inner: Arc<Inner<T>>,
+
+    // Handle to the task that is blocked on this sender. This handle is sent
+    // to the receiver half in order to be notified when the sender becomes
+    // unblocked.
+    sender_task: Arc<Mutex<SenderTask>>,
+
+    // True if the sender might be blocked. This is an optimization to avoid
+    // having to lock the mutex most of the time.
+    maybe_parked: bool,
+}
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `unbounded` method.
+#[derive(Debug)]
+pub struct UnboundedSender<T>(Sender<T>);
+
+trait AssertKinds: Send + Sync + Clone {}
+impl AssertKinds for UnboundedSender<u32> {}
+
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `channel` method.
+#[derive(Debug)]
+pub struct Receiver<T> {
+    inner: Arc<Inner<T>>,
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `unbounded` method.
+#[derive(Debug)]
+pub struct UnboundedReceiver<T>(Receiver<T>);
+
+/// Error type for sending, used when the receiving end of a channel is
+/// dropped
+#[derive(Clone, PartialEq, Eq)]
+pub struct SendError<T>(T);
+
+/// Error type returned from `try_send`
+#[derive(Clone, PartialEq, Eq)]
+pub struct TrySendError<T> {
+    kind: TrySendErrorKind<T>,
+}
+
+#[derive(Clone, PartialEq, Eq)]
+enum TrySendErrorKind<T> {
+    Full(T),
+    Disconnected(T),
+}
+
+impl<T> fmt::Debug for SendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("SendError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for SendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "send failed because receiver is gone")
+    }
+}
+
+impl<T: Any> Error for SendError<T>
+{
+    fn description(&self) -> &str {
+        "send failed because receiver is gone"
+    }
+}
+
+impl<T> SendError<T> {
+    /// Returns the message that was attempted to be sent but failed.
+    pub fn into_inner(self) -> T {
+        self.0
+    }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("TrySendError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        if self.is_full() {
+            write!(fmt, "send failed because channel is full")
+        } else {
+            write!(fmt, "send failed because receiver is gone")
+        }
+    }
+}
+
+impl<T: Any> Error for TrySendError<T> {
+    fn description(&self) -> &str {
+        if self.is_full() {
+            "send failed because channel is full"
+        } else {
+            "send failed because receiver is gone"
+        }
+    }
+}
+
+impl<T> TrySendError<T> {
+    /// Returns true if this error is a result of the channel being full
+    pub fn is_full(&self) -> bool {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Full(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error is a result of the receiver being dropped
+    pub fn is_disconnected(&self) -> bool {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Disconnected(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns the message that was attempted to be sent but failed.
+    pub fn into_inner(self) -> T {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Full(v) | Disconnected(v) => v,
+        }
+    }
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+    // Max buffer size of the channel. If `None` then the channel is unbounded.
+    buffer: Option<usize>,
+
+    // Internal channel state. Consists of the number of messages stored in the
+    // channel as well as a flag signalling that the channel is closed.
+    state: AtomicUsize,
+
+    // Atomic, FIFO queue used to send messages to the receiver
+    message_queue: Queue<Option<T>>,
+
+    // Atomic, FIFO queue used to send parked task handles to the receiver.
+    parked_queue: Queue<Arc<Mutex<SenderTask>>>,
+
+    // Number of senders in existence
+    num_senders: AtomicUsize,
+
+    // Handle to the receiver's task.
+    recv_task: Mutex<ReceiverTask>,
+}
+
+// Struct representation of `Inner::state`.
+#[derive(Debug, Clone, Copy)]
+struct State {
+    // `true` when the channel is open
+    is_open: bool,
+
+    // Number of messages in the channel
+    num_messages: usize,
+}
+
+#[derive(Debug)]
+struct ReceiverTask {
+    unparked: bool,
+    task: Option<Task>,
+}
+
+// Returned from Receiver::try_park()
+enum TryPark {
+    Parked,
+    Closed,
+    NotEmpty,
+}
+
+// The `is_open` flag is stored in the left-most bit of `Inner::state`
+const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1);
+
+// When a new channel is created, it is created in the open state with no
+// pending messages.
+const INIT_STATE: usize = OPEN_MASK;
+
+// The maximum number of messages that a channel can track is `usize::MAX >> 1`
+const MAX_CAPACITY: usize = !(OPEN_MASK);
+
+// The maximum requested buffer size must be less than the maximum capacity of
+// a channel. This is because each sender gets a guaranteed slot.
+const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
+
+// Sent to the consumer to wake up blocked producers
+#[derive(Debug)]
+struct SenderTask {
+    task: Option<Task>,
+    is_parked: bool,
+}
+
+impl SenderTask {
+    fn new() -> Self {
+        SenderTask {
+            task: None,
+            is_parked: false,
+        }
+    }
+
+    fn notify(&mut self) {
+        self.is_parked = false;
+
+        if let Some(task) = self.task.take() {
+            task.notify();
+        }
+    }
+}
+
+/// Creates an in-memory channel implementation of the `Stream` trait with
+/// bounded capacity.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. This
+/// channel is unique in that it implements back pressure to ensure that the
+/// sender never outpaces the receiver. The channel capacity is equal to
+/// `buffer + num-senders`. In other words, each sender gets a guaranteed slot
+/// in the channel capacity, and on top of that there are `buffer` "first come,
+/// first serve" slots available to all senders.
+///
+/// The `Receiver` returned implements the `Stream` trait and has access to any
+/// number of the associated combinators for transforming the result.
+pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
+    // Check that the requested buffer size does not exceed the maximum buffer
+    // size permitted by the system.
+    assert!(buffer < MAX_BUFFER, "requested buffer size too large");
+    channel2(Some(buffer))
+}
+
+/// Creates an in-memory channel implementation of the `Stream` trait with
+/// unbounded capacity.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. A `send`
+/// on this channel will always succeed as long as the receive half has not
+/// been closed. If the receiver falls behind, messages will be buffered
+/// internally.
+///
+/// **Note** that the amount of available system memory is an implicit bound to
+/// the channel. Using an `unbounded` channel has the ability of causing the
+/// process to run out of memory. In this case, the process will be aborted.
+pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
+    let (tx, rx) = channel2(None);
+    (UnboundedSender(tx), UnboundedReceiver(rx))
+}
+
+fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
+    let inner = Arc::new(Inner {
+        buffer: buffer,
+        state: AtomicUsize::new(INIT_STATE),
+        message_queue: Queue::new(),
+        parked_queue: Queue::new(),
+        num_senders: AtomicUsize::new(1),
+        recv_task: Mutex::new(ReceiverTask {
+            unparked: false,
+            task: None,
+        }),
+    });
+
+    let tx = Sender {
+        inner: inner.clone(),
+        sender_task: Arc::new(Mutex::new(SenderTask::new())),
+        maybe_parked: false,
+    };
+
+    let rx = Receiver {
+        inner: inner,
+    };
+
+    (tx, rx)
+}
+
+/*
+ *
+ * ===== impl Sender =====
+ *
+ */
+
+impl<T> Sender<T> {
+    /// Attempts to send a message on this `Sender<T>` without blocking.
+    ///
+    /// This function, unlike `start_send`, is safe to call whether it's being
+    /// called on a task or not. Note that this function, however, will *not*
+    /// attempt to block the current task if the message cannot be sent.
+    ///
+    /// It is not recommended to call this function from inside of a future,
+    /// only from an external thread where you've otherwise arranged to be
+    /// notified when the channel is no longer full.
+    pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+        // If the sender is currently blocked, reject the message
+        if !self.poll_unparked(false).is_ready() {
+            return Err(TrySendError {
+                kind: TrySendErrorKind::Full(msg),
+            });
+        }
+
+        // The channel has capacity to accept the message, so send it
+        self.do_send(Some(msg), false)
+            .map_err(|SendError(v)| {
+                TrySendError {
+                    kind: TrySendErrorKind::Disconnected(v),
+                }
+            })
+    }
+
+    // Do the send without failing
+    // None means close
+    fn do_send(&mut self, msg: Option<T>, do_park: bool) -> Result<(), SendError<T>> {
+        // First, increment the number of messages contained by the channel.
+        // This operation will also atomically determine if the sender task
+        // should be parked.
+        //
+        // None is returned in the case that the channel has been closed by the
+        // receiver. This happens when `Receiver::close` is called or the
+        // receiver is dropped.
+        let park_self = match self.inc_num_messages(msg.is_none()) {
+            Some(park_self) => park_self,
+            None => {
+                // The receiver has closed the channel. Only abort if actually
+                // sending a message. It is important that the stream
+                // termination (None) is always sent. This technically means
+                // that it is possible for the queue to contain the following
+                // number of messages:
+                //
+                //     num-senders + buffer + 1
+                //
+                if let Some(msg) = msg {
+                    return Err(SendError(msg));
+                } else {
+                    return Ok(());
+                }
+            }
+        };
+
+        // If the channel has reached capacity, then the sender task needs to
+        // be parked. This will send the task handle on the parked task queue.
+        //
+        // However, when `do_send` is called while dropping the `Sender`,
+        // `task::current()` can't be called safely. In this case, in order to
+        // maintain internal consistency, a blank message is pushed onto the
+        // parked task queue.
+        if park_self {
+            self.park(do_park);
+        }
+
+        self.queue_push_and_signal(msg);
+
+        Ok(())
+    }
+
+    // Do the send without parking current task.
+    //
+    // To be called from unbounded sender.
+    fn do_send_nb(&self, msg: T) -> Result<(), SendError<T>> {
+        match self.inc_num_messages(false) {
+            Some(park_self) => assert!(!park_self),
+            None => return Err(SendError(msg)),
+        };
+
+        self.queue_push_and_signal(Some(msg));
+
+        Ok(())
+    }
+
+    // Push message to the queue and signal to the receiver
+    fn queue_push_and_signal(&self, msg: Option<T>) {
+        // Push the message onto the message queue
+        self.inner.message_queue.push(msg);
+
+        // Signal to the receiver that a message has been enqueued. If the
+        // receiver is parked, this will unpark the task.
+        self.signal();
+    }
+
+    // Increment the number of queued messages. Returns if the sender should
+    // block.
+    fn inc_num_messages(&self, close: bool) -> Option<bool> {
+        let mut curr = self.inner.state.load(SeqCst);
+
+        loop {
+            let mut state = decode_state(curr);
+
+            // The receiver end closed the channel.
+            if !state.is_open {
+                return None;
+            }
+
+            // This probably is never hit? Odds are the process will run out of
+            // memory first. It may be worth to return something else in this
+            // case?
+            assert!(state.num_messages < MAX_CAPACITY, "buffer space exhausted; \
+                    sending this messages would overflow the state");
+
+            state.num_messages += 1;
+
+            // The channel is closed by all sender handles being dropped.
+            if close {
+                state.is_open = false;
+            }
+
+            let next = encode_state(&state);
+            match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+                Ok(_) => {
+                    // Block if the current number of pending messages has exceeded
+                    // the configured buffer size
+                    let park_self = match self.inner.buffer {
+                        Some(buffer) => state.num_messages > buffer,
+                        None => false,
+                    };
+
+                    return Some(park_self)
+                }
+                Err(actual) => curr = actual,
+            }
+        }
+    }
+
+    // Signal to the receiver task that a message has been enqueued
+    fn signal(&self) {
+        // TODO
+        // This logic can probably be improved by guarding the lock with an
+        // atomic.
+        //
+        // Do this step first so that the lock is dropped when
+        // `unpark` is called
+        let task = {
+            let mut recv_task = self.inner.recv_task.lock().unwrap();
+
+            // If the receiver has already been unparked, then there is nothing
+            // more to do
+            if recv_task.unparked {
+                return;
+            }
+
+            // Setting this flag enables the receiving end to detect that
+            // an unpark event happened in order to avoid unnecessarily
+            // parking.
+            recv_task.unparked = true;
+            recv_task.task.take()
+        };
+
+        if let Some(task) = task {
+            task.notify();
+        }
+    }
+
+    fn park(&mut self, can_park: bool) {
+        // TODO: clean up internal state if the task::current will fail
+
+        let task = if can_park {
+            Some(task::current())
+        } else {
+            None
+        };
+
+        {
+            let mut sender = self.sender_task.lock().unwrap();
+            sender.task = task;
+            sender.is_parked = true;
+        }
+
+        // Send handle over queue
+        let t = self.sender_task.clone();
+        self.inner.parked_queue.push(t);
+
+        // Check to make sure we weren't closed after we sent our task on the
+        // queue
+        let state = decode_state(self.inner.state.load(SeqCst));
+        self.maybe_parked = state.is_open;
+    }
+
+    /// Polls the channel to determine if there is guaranteed to be capacity to send at least one
+    /// item without waiting.
+    ///
+    /// Returns `Ok(Async::Ready(_))` if there is sufficient capacity, or returns
+    /// `Ok(Async::NotReady)` if the channel is not guaranteed to have capacity. Returns
+    /// `Err(SendError(_))` if the receiver has been dropped.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if called from outside the context of a task or future.
+    pub fn poll_ready(&mut self) -> Poll<(), SendError<()>> {
+        let state = decode_state(self.inner.state.load(SeqCst));
+        if !state.is_open {
+            return Err(SendError(()));
+        }
+
+        Ok(self.poll_unparked(true))
+    }
+
+    /// Returns whether this channel is closed without needing a context.
+    pub fn is_closed(&self) -> bool {
+        !decode_state(self.inner.state.load(SeqCst)).is_open
+    }
+
+    fn poll_unparked(&mut self, do_park: bool) -> Async<()> {
+        // First check the `maybe_parked` variable. This avoids acquiring the
+        // lock in most cases
+        if self.maybe_parked {
+            // Get a lock on the task handle
+            let mut task = self.sender_task.lock().unwrap();
+
+            if !task.is_parked {
+                self.maybe_parked = false;
+                return Async::Ready(())
+            }
+
+            // At this point, an unpark request is pending, so there will be an
+            // unpark sometime in the future. We just need to make sure that
+            // the correct task will be notified.
+            //
+            // Update the task in case the `Sender` has been moved to another
+            // task
+            task.task = if do_park {
+                Some(task::current())
+            } else {
+                None
+            };
+
+            Async::NotReady
+        } else {
+            Async::Ready(())
+        }
+    }
+}
+
+impl<T> Sink for Sender<T> {
+    type SinkItem = T;
+    type SinkError = SendError<T>;
+
+    fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+        // If the sender is currently blocked, reject the message before doing
+        // any work.
+        if !self.poll_unparked(true).is_ready() {
+            return Ok(AsyncSink::NotReady(msg));
+        }
+
+        // The channel has capacity to accept the message, so send it.
+        self.do_send(Some(msg), true)?;
+
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+        self.poll_ready()
+            // At this point, the value cannot be returned and `SendError`
+            // cannot be created with a `T` without breaking backwards
+            // comptibility. This means we cannot return an error.
+            //
+            // That said, there is also no guarantee that a `poll_complete`
+            // returning `Ok` implies the receiver sees the message.
+            .or_else(|_| Ok(().into()))
+    }
+
+    fn close(&mut self) -> Poll<(), SendError<T>> {
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<T> UnboundedSender<T> {
+    /// Returns whether this channel is closed without needing a context.
+    pub fn is_closed(&self) -> bool {
+        self.0.is_closed()
+    }
+
+    /// Sends the provided message along this channel.
+    ///
+    /// This is an unbounded sender, so this function differs from `Sink::send`
+    /// by ensuring the return type reflects that the channel is always ready to
+    /// receive messages.
+    #[deprecated(note = "renamed to `unbounded_send`")]
+    #[doc(hidden)]
+    pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+        self.unbounded_send(msg)
+    }
+
+    /// Sends the provided message along this channel.
+    ///
+    /// This is an unbounded sender, so this function differs from `Sink::send`
+    /// by ensuring the return type reflects that the channel is always ready to
+    /// receive messages.
+    pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
+        self.0.do_send_nb(msg)
+    }
+}
+
+impl<T> Sink for UnboundedSender<T> {
+    type SinkItem = T;
+    type SinkError = SendError<T>;
+
+    fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+        self.0.start_send(msg)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+        self.0.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), SendError<T>> {
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<'a, T> Sink for &'a UnboundedSender<T> {
+    type SinkItem = T;
+    type SinkError = SendError<T>;
+
+    fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+        self.0.do_send_nb(msg)?;
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+        Ok(Async::Ready(()))
+    }
+
+    fn close(&mut self) -> Poll<(), SendError<T>> {
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<T> Clone for UnboundedSender<T> {
+    fn clone(&self) -> UnboundedSender<T> {
+        UnboundedSender(self.0.clone())
+    }
+}
+
+
+impl<T> Clone for Sender<T> {
+    fn clone(&self) -> Sender<T> {
+        // Since this atomic op isn't actually guarding any memory and we don't
+        // care about any orderings besides the ordering on the single atomic
+        // variable, a relaxed ordering is acceptable.
+        let mut curr = self.inner.num_senders.load(SeqCst);
+
+        loop {
+            // If the maximum number of senders has been reached, then fail
+            if curr == self.inner.max_senders() {
+                panic!("cannot clone `Sender` -- too many outstanding senders");
+            }
+
+            debug_assert!(curr < self.inner.max_senders());
+
+            let next = curr + 1;
+            let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
+
+            // The ABA problem doesn't matter here. We only care that the
+            // number of senders never exceeds the maximum.
+            if actual == curr {
+                return Sender {
+                    inner: self.inner.clone(),
+                    sender_task: Arc::new(Mutex::new(SenderTask::new())),
+                    maybe_parked: false,
+                };
+            }
+
+            curr = actual;
+        }
+    }
+}
+
+impl<T> Drop for Sender<T> {
+    fn drop(&mut self) {
+        // Ordering between variables don't matter here
+        let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
+
+        if prev == 1 {
+            let _ = self.do_send(None, false);
+        }
+    }
+}
+
+/*
+ *
+ * ===== impl Receiver =====
+ *
+ */
+
+impl<T> Receiver<T> {
+    /// Closes the receiving half
+    ///
+    /// This prevents any further messages from being sent on the channel while
+    /// still enabling the receiver to drain messages that are buffered.
+    pub fn close(&mut self) {
+        let mut curr = self.inner.state.load(SeqCst);
+
+        loop {
+            let mut state = decode_state(curr);
+
+            if !state.is_open {
+                break
+            }
+
+            state.is_open = false;
+
+            let next = encode_state(&state);
+            match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+                Ok(_) => break,
+                Err(actual) => curr = actual,
+            }
+        }
+
+        // Wake up any threads waiting as they'll see that we've closed the
+        // channel and will continue on their merry way.
+        loop {
+            match unsafe { self.inner.parked_queue.pop() } {
+                PopResult::Data(task) => {
+                    task.lock().unwrap().notify();
+                }
+                PopResult::Empty => break,
+                PopResult::Inconsistent => thread::yield_now(),
+            }
+        }
+    }
+
+    fn next_message(&mut self) -> Async<Option<T>> {
+        // Pop off a message
+        loop {
+            match unsafe { self.inner.message_queue.pop() } {
+                PopResult::Data(msg) => {
+                    // If there are any parked task handles in the parked queue,
+                    // pop one and unpark it.
+                    self.unpark_one();
+                    // Decrement number of messages
+                    self.dec_num_messages();
+
+                    return Async::Ready(msg);
+                }
+                PopResult::Empty => {
+                    // The queue is empty, return NotReady
+                    return Async::NotReady;
+                }
+                PopResult::Inconsistent => {
+                    // Inconsistent means that there will be a message to pop
+                    // in a short time. This branch can only be reached if
+                    // values are being produced from another thread, so there
+                    // are a few ways that we can deal with this:
+                    //
+                    // 1) Spin
+                    // 2) thread::yield_now()
+                    // 3) task::current().unwrap() & return NotReady
+                    //
+                    // For now, thread::yield_now() is used, but it would
+                    // probably be better to spin a few times then yield.
+                    thread::yield_now();
+                }
+            }
+        }
+    }
+
+    // Unpark a single task handle if there is one pending in the parked queue
+    fn unpark_one(&mut self) {
+        loop {
+            match unsafe { self.inner.parked_queue.pop() } {
+                PopResult::Data(task) => {
+                    task.lock().unwrap().notify();
+                    return;
+                }
+                PopResult::Empty => {
+                    // Queue empty, no task to wake up.
+                    return;
+                }
+                PopResult::Inconsistent => {
+                    // Same as above
+                    thread::yield_now();
+                }
+            }
+        }
+    }
+
+    // Try to park the receiver task
+    fn try_park(&self) -> TryPark {
+        let curr = self.inner.state.load(SeqCst);
+        let state = decode_state(curr);
+
+        // If the channel is closed, then there is no need to park.
+        if state.is_closed() {
+            return TryPark::Closed;
+        }
+
+        // First, track the task in the `recv_task` slot
+        let mut recv_task = self.inner.recv_task.lock().unwrap();
+
+        if recv_task.unparked {
+            // Consume the `unpark` signal without actually parking
+            recv_task.unparked = false;
+            return TryPark::NotEmpty;
+        }
+
+        recv_task.task = Some(task::current());
+        TryPark::Parked
+    }
+
+    fn dec_num_messages(&self) {
+        let mut curr = self.inner.state.load(SeqCst);
+
+        loop {
+            let mut state = decode_state(curr);
+
+            state.num_messages -= 1;
+
+            let next = encode_state(&state);
+            match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+                Ok(_) => break,
+                Err(actual) => curr = actual,
+            }
+        }