bump rouille to 3.0.0

This also:

* replaces multipart 0.13.6 with 0.15.4
* replaces rand 0.4.3 with 0.5.5
* replaces buf_redux 0.6.3 with 0.8.1

Change-Id: I109e6dee6c76999775b98d75634c3cc248e15450
diff --git a/rustc_deps/Cargo.lock b/rustc_deps/Cargo.lock
index 5432dbd..8190dee 100644
--- a/rustc_deps/Cargo.lock
+++ b/rustc_deps/Cargo.lock
@@ -181,10 +181,10 @@
 
 [[package]]
 name = "buf_redux"
-version = "0.6.3"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -531,7 +531,7 @@
  "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "ring 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rouille 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rouille 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rust-crypto 0.2.36",
  "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
  "rustls 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -933,16 +933,17 @@
 
 [[package]]
 name = "multipart"
-version = "0.13.6"
+version = "0.15.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "buf_redux 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "buf_redux 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "httparse 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "mime_guess 1.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "twoway 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -1436,7 +1437,7 @@
 
 [[package]]
 name = "rouille"
-version = "2.2.0"
+version = "3.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1444,9 +1445,9 @@
  "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "deflate 0.7.19 (registry+https://github.com/rust-lang/crates.io-index)",
  "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "multipart 0.13.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "multipart 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.84 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.84 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2141,7 +2142,7 @@
 "checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab"
 "checksum brotli-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd"
 "checksum brotli2 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e"
-"checksum buf_redux 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b9279646319ff816b05fb5897883ece50d7d854d12b59992683d4f8a71b0f949"
+"checksum buf_redux 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f25c67abbf523ff8457771622fb731ac4a2391439de33bc60febcdee1749c9"
 "checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39"
 "checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40"
 "checksum bytecount 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f861d9ce359f56dbcb6e0c2a1cb84e52ad732cadb57b806adeb3c7668caccbd8"
@@ -2218,7 +2219,7 @@
 "checksum mime_guess 1.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2d4c0961143b8efdcfa29c3ae63281601b446a4a668165454b6c90f8024954c5"
 "checksum miniz_oxide 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5ad30a47319c16cde58d0314f5d98202a80c9083b5f61178457403dfb14e509c"
 "checksum miniz_oxide_c_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "28edaef377517fd9fe3e085c37d892ce7acd1fbeab9239c5a36eec352d8a8b7e"
-"checksum multipart 0.13.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92f54eb45230c3aa20864ccf0c277eeaeadcf5e437e91731db498dbf7fbe0ec6"
+"checksum multipart 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)" = "adba94490a79baf2d6a23eac897157047008272fa3eecb3373ae6377b91eca28"
 "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
 "checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
 "checksum nom 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b"
@@ -2273,7 +2274,7 @@
 "checksum regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4e47a2ed29da7a9e1960e1639e7a982e6edc6d49be308a3b02daf511504a16d1"
 "checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5"
 "checksum ring 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)" = "be5386a5f59e5f5bcaea38b50ad26c09e3918a0abc0610640b3be5cfd85d6894"
-"checksum rouille 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0845b9c39ba772da769fe2aaa4d81bfd10695a7ea051d0510702260ff4159841"
+"checksum rouille 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "112568052ec17fa26c6c11c40acbb30d3ad244bf3d6da0be181f5e7e42e5004f"
 "checksum rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "adacaae16d02b6ec37fdc7acfcddf365978de76d1983d3ee22afc260e1ca9619"
 "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
 "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
diff --git a/rustc_deps/Cargo.toml b/rustc_deps/Cargo.toml
index 27e1cb8..63b39e8 100644
--- a/rustc_deps/Cargo.toml
+++ b/rustc_deps/Cargo.toml
@@ -54,7 +54,7 @@
 rand = "0.5"
 ring = "0.14"
 regex = "1.0.6"
-rouille = "2.1.0"
+rouille = "3.0.0"
 rust-crypto = "0.2"
 rustc-serialize = "0.3"
 rustls = "0.15"
diff --git a/rustc_deps/vendor/buf_redux/.cargo-checksum.json b/rustc_deps/vendor/buf_redux/.cargo-checksum.json
index 348359e..2f5761b 100644
--- a/rustc_deps/vendor/buf_redux/.cargo-checksum.json
+++ b/rustc_deps/vendor/buf_redux/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"b77ae9cdf71ef526db74329113b56f572d0b7e9c78f81fa93c181f743b4ed7aa","LICENSE-APACHE":"2a2732053c4521fa64ef2b121bc3b63b3c3bee3f809d3fb9046eef5641079ed6","LICENSE-MIT":"eda5f797d53f7693f085b8ffbfd8eb7dba189d7f4478685502538a4db77d39d4","README.md":"e902c29266c23ba176578c77cc4044d392ca4d2be3f517a1c60e673952af9025","src/benches.rs":"3de0a73c537d9095874864f40480b44a1e34354f2cc5bbfc52135ada41f1dc5a","src/lib.rs":"6d3c37e55722229f3720a8af8479eb4d1763543d9dd35a14fb941af0df45cb38","src/nightly.rs":"de0921c8fdc71ec938df91f37c86a466e3ff526252362b64768750d2c60bcffb","src/raw.rs":"9889df7640a7555181d17114a732b96231a0740b368eb67b936f7943fa2a5a0f","src/std_tests.rs":"1c4bdbc847b576c4431f7422476ad391f90b2044af18389cf58fd3a5b80ff0d6","src/strategy.rs":"b14ae0a0f07fa1326868ad55ba0408e1ebaf0f3160debe76698a7216b7217e4d","src/tests.rs":"a106497f30a4fa40b920905a9b5d51fd0658b4f8081b2a704db87cc7027be123"},"package":"b9279646319ff816b05fb5897883ece50d7d854d12b59992683d4f8a71b0f949"}
\ No newline at end of file
+{"files":{"Cargo.toml":"0b1b3e60ca9825e2e3b29a9c1b2be249bc9bd8f8b0de68da3f7188a4bdc2ec1b","LICENSE-APACHE":"89d0f2bc8a210c661b761be1a37561644394f778f81ba24758596c79b09b8ba6","LICENSE-MIT":"eda5f797d53f7693f085b8ffbfd8eb7dba189d7f4478685502538a4db77d39d4","README.md":"b7a0f7c6e3609ca4690a9d78e2a882d6238f697d673df15db41f28da907657a6","src/benches.rs":"a5cc7a069e23c631434b22c5c659b2d00919f14b5013a17a589c34be48452e4d","src/buffer/mod.rs":"d7a640a1f587069e099fb346be23563629b5f1b340ec53b6f1d78814d0cd497c","src/buffer/slice_deque_buf.rs":"882efcdb2d8a28f2846f27cffc63b4fcd8b5c3874fa1a51a06d7789d36ad504a","src/buffer/std_buf.rs":"8f97ea8d1ac5ade070e62db0bae63cb0d0f8f53baab60ebb1e31fe03096ec322","src/lib.rs":"050bf655946c80b079fd114f3f29812af36ce17e603f519a84ff74c5a62e6706","src/nightly.rs":"873f9356d655378a51d9c0f384a55bec48204f88c0e93f1208580b8f4b4e6f92","src/policy.rs":"d045ca3b65dd97df3ad48671a71d692afc2e8840d2c6ae52ebfa3186745c5d31","src/ringbuf_tests.rs":"fecaf2bcadea3fbcf60aa842ed764353f9b2e36eb51013d075b5fea45d721af6","src/std_tests.rs":"3a19fc0eea88fb7a6f9d5e339214f56b7ea28ea3a13838d54ba4ddedbd236710"},"package":"72f25c67abbf523ff8457771622fb731ac4a2391439de33bc60febcdee1749c9"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/buf_redux/Cargo.toml b/rustc_deps/vendor/buf_redux/Cargo.toml
index c849c68..806c844 100644
--- a/rustc_deps/vendor/buf_redux/Cargo.toml
+++ b/rustc_deps/vendor/buf_redux/Cargo.toml
@@ -12,7 +12,7 @@
 
 [package]
 name = "buf_redux"
-version = "0.6.3"
+version = "0.8.1"
 authors = ["Austin Bonander <austin.bonander@gmail.com>"]
 description = "Drop-in replacements for buffered I/O in `std::io` with extra features."
 documentation = "https://docs.rs/buf_redux"
@@ -21,10 +21,14 @@
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/abonander/buf_redux"
 [dependencies.memchr]
-version = "1.0"
+version = "2.0"
 
 [dependencies.safemem]
 version = "0.2"
 
 [features]
-nightly = []
+default = ["slice-deque"]
+nightly = ["slice-deque/unstable"]
+[target."cfg(any(unix, windows))".dependencies.slice-deque]
+version = "0.1"
+optional = true
diff --git a/rustc_deps/vendor/buf_redux/LICENSE-APACHE b/rustc_deps/vendor/buf_redux/LICENSE-APACHE
index 20b2156..84399b1 100644
--- a/rustc_deps/vendor/buf_redux/LICENSE-APACHE
+++ b/rustc_deps/vendor/buf_redux/LICENSE-APACHE
@@ -1,10 +1,198 @@
-Copyright 2016 Austin Bonander and The Rust Project Developers

+                              Apache License

+                        Version 2.0, January 2004

+                     http://www.apache.org/licenses/

+

+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

+

+1. Definitions.

+

+   "License" shall mean the terms and conditions for use, reproduction,

+   and distribution as defined by Sections 1 through 9 of this document.

+

+   "Licensor" shall mean the copyright owner or entity authorized by

+   the copyright owner that is granting the License.

+

+   "Legal Entity" shall mean the union of the acting entity and all

+   other entities that control, are controlled by, or are under common

+   control with that entity. For the purposes of this definition,

+   "control" means (i) the power, direct or indirect, to cause the

+   direction or management of such entity, whether by contract or

+   otherwise, or (ii) ownership of fifty percent (50%) or more of the

+   outstanding shares, or (iii) beneficial ownership of such entity.

+

+   "You" (or "Your") shall mean an individual or Legal Entity

+   exercising permissions granted by this License.

+

+   "Source" form shall mean the preferred form for making modifications,

+   including but not limited to software source code, documentation

+   source, and configuration files.

+

+   "Object" form shall mean any form resulting from mechanical

+   transformation or translation of a Source form, including but

+   not limited to compiled object code, generated documentation,

+   and conversions to other media types.

+

+   "Work" shall mean the work of authorship, whether in Source or

+   Object form, made available under the License, as indicated by a

+   copyright notice that is included in or attached to the work

+   (an example is provided in the Appendix below).

+

+   "Derivative Works" shall mean any work, whether in Source or Object

+   form, that is based on (or derived from) the Work and for which the

+   editorial revisions, annotations, elaborations, or other modifications

+   represent, as a whole, an original work of authorship. For the purposes

+   of this License, Derivative Works shall not include works that remain

+   separable from, or merely link (or bind by name) to the interfaces of,

+   the Work and Derivative Works thereof.

+

+   "Contribution" shall mean any work of authorship, including

+   the original version of the Work and any modifications or additions

+   to that Work or Derivative Works thereof, that is intentionally

+   submitted to Licensor for inclusion in the Work by the copyright owner

+   or by an individual or Legal Entity authorized to submit on behalf of

+   the copyright owner. For the purposes of this definition, "submitted"

+   means any form of electronic, verbal, or written communication sent

+   to the Licensor or its representatives, including but not limited to

+   communication on electronic mailing lists, source code control systems,

+   and issue tracking systems that are managed by, or on behalf of, the

+   Licensor for the purpose of discussing and improving the Work, but

+   excluding communication that is conspicuously marked or otherwise

+   designated in writing by the copyright owner as "Not a Contribution."

+

+   "Contributor" shall mean Licensor and any individual or Legal Entity

+   on behalf of whom a Contribution has been received by Licensor and

+   subsequently incorporated within the Work.

+

+2. Grant of Copyright License. Subject to the terms and conditions of

+   this License, each Contributor hereby grants to You a perpetual,

+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable

+   copyright license to reproduce, prepare Derivative Works of,

+   publicly display, publicly perform, sublicense, and distribute the

+   Work and such Derivative Works in Source or Object form.

+

+3. Grant of Patent License. Subject to the terms and conditions of

+   this License, each Contributor hereby grants to You a perpetual,

+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable

+   (except as stated in this section) patent license to make, have made,

+   use, offer to sell, sell, import, and otherwise transfer the Work,

+   where such license applies only to those patent claims licensable

+   by such Contributor that are necessarily infringed by their

+   Contribution(s) alone or by combination of their Contribution(s)

+   with the Work to which such Contribution(s) was submitted. If You

+   institute patent litigation against any entity (including a

+   cross-claim or counterclaim in a lawsuit) alleging that the Work

+   or a Contribution incorporated within the Work constitutes direct

+   or contributory patent infringement, then any patent licenses

+   granted to You under this License for that Work shall terminate

+   as of the date such litigation is filed.

+

+4. Redistribution. You may reproduce and distribute copies of the

+   Work or Derivative Works thereof in any medium, with or without

+   modifications, and in Source or Object form, provided that You

+   meet the following conditions:

+

+   (a) You must give any other recipients of the Work or

+       Derivative Works a copy of this License; and

+

+   (b) You must cause any modified files to carry prominent notices

+       stating that You changed the files; and

+

+   (c) You must retain, in the Source form of any Derivative Works

+       that You distribute, all copyright, patent, trademark, and

+       attribution notices from the Source form of the Work,

+       excluding those notices that do not pertain to any part of

+       the Derivative Works; and

+

+   (d) If the Work includes a "NOTICE" text file as part of its

+       distribution, then any Derivative Works that You distribute must

+       include a readable copy of the attribution notices contained

+       within such NOTICE file, excluding those notices that do not

+       pertain to any part of the Derivative Works, in at least one

+       of the following places: within a NOTICE text file distributed

+       as part of the Derivative Works; within the Source form or

+       documentation, if provided along with the Derivative Works; or,

+       within a display generated by the Derivative Works, if and

+       wherever such third-party notices normally appear. The contents

+       of the NOTICE file are for informational purposes only and

+       do not modify the License. You may add Your own attribution

+       notices within Derivative Works that You distribute, alongside

+       or as an addendum to the NOTICE text from the Work, provided

+       that such additional attribution notices cannot be construed

+       as modifying the License.

+

+   You may add Your own copyright statement to Your modifications and

+   may provide additional or different license terms and conditions

+   for use, reproduction, or distribution of Your modifications, or

+   for any such Derivative Works as a whole, provided Your use,

+   reproduction, and distribution of the Work otherwise complies with

+   the conditions stated in this License.

+

+5. Submission of Contributions. Unless You explicitly state otherwise,

+   any Contribution intentionally submitted for inclusion in the Work

+   by You to the Licensor shall be under the terms and conditions of

+   this License, without any additional terms or conditions.

+   Notwithstanding the above, nothing herein shall supersede or modify

+   the terms of any separate license agreement you may have executed

+   with Licensor regarding such Contributions.

+

+6. Trademarks. This License does not grant permission to use the trade

+   names, trademarks, service marks, or product names of the Licensor,

+   except as required for reasonable and customary use in describing the

+   origin of the Work and reproducing the content of the NOTICE file.

+

+7. Disclaimer of Warranty. Unless required by applicable law or

+   agreed to in writing, Licensor provides the Work (and each

+   Contributor provides its Contributions) on an "AS IS" BASIS,

+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or

+   implied, including, without limitation, any warranties or conditions

+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A

+   PARTICULAR PURPOSE. You are solely responsible for determining the

+   appropriateness of using or redistributing the Work and assume any

+   risks associated with Your exercise of permissions under this License.

+

+8. Limitation of Liability. In no event and under no legal theory,

+   whether in tort (including negligence), contract, or otherwise,

+   unless required by applicable law (such as deliberate and grossly

+   negligent acts) or agreed to in writing, shall any Contributor be

+   liable to You for damages, including any direct, indirect, special,

+   incidental, or consequential damages of any character arising as a

+   result of this License or out of the use or inability to use the

+   Work (including but not limited to damages for loss of goodwill,

+   work stoppage, computer failure or malfunction, or any and all

+   other commercial damages or losses), even if such Contributor

+   has been advised of the possibility of such damages.

+

+9. Accepting Warranty or Additional Liability. While redistributing

+   the Work or Derivative Works thereof, You may choose to offer,

+   and charge a fee for, acceptance of support, warranty, indemnity,

+   or other liability obligations and/or rights consistent with this

+   License. However, in accepting such obligations, You may act only

+   on Your own behalf and on Your sole responsibility, not on behalf

+   of any other Contributor, and only if You agree to indemnify,

+   defend, and hold each Contributor harmless for any liability

+   incurred by, or claims asserted against, such Contributor by reason

+   of your accepting any such warranty or additional liability.

+

+END OF TERMS AND CONDITIONS

+

+APPENDIX: How to apply the Apache License to your work.

+

+   To apply the Apache License to your work, attach the following

+   boilerplate notice, with the fields enclosed by brackets "[]"

+   replaced with your own identifying information. (Don't include

+   the brackets!)  The text should be enclosed in the appropriate

+   comment syntax for the file format. We also recommend that a

+   file or class name and description of purpose be included on the

+   same "printed page" as the copyright notice for easier

+   identification within third-party archives.

+

+Copyright 2016-2018 Austin Bonander and The Rust Project Developers

 

 Licensed under the Apache License, Version 2.0 (the "License");

 you may not use this file except in compliance with the License.

 You may obtain a copy of the License at

 

-    http://www.apache.org/licenses/LICENSE-2.0

+	http://www.apache.org/licenses/LICENSE-2.0

 

 Unless required by applicable law or agreed to in writing, software

 distributed under the License is distributed on an "AS IS" BASIS,

diff --git a/rustc_deps/vendor/buf_redux/README.md b/rustc_deps/vendor/buf_redux/README.md
index 2c62406..244a2b2 100644
--- a/rustc_deps/vendor/buf_redux/README.md
+++ b/rustc_deps/vendor/buf_redux/README.md
@@ -1,4 +1,8 @@
-# buf\_re(a)dux

+# buf\_re(a)dux 

+[![Travis](https://img.shields.io/travis/abonander/buf_redux.svg)](https://travis-ci.org/abonander/buf_redux)

+[![Crates.io](https://img.shields.io/crates/v/buf_redux.svg)](https://crates.io/crates/buf_redux)

+[![Crates.io](https://img.shields.io/crates/d/buf_redux.svg)](https://crates.io/crates/buf_redux)

+[![Crates.io](https://img.shields.io/crates/l/buf_redux.svg)](https://crates.io/crates/buf_redux)

 

 Drop-in replacements for buffered I/O types in `std::io`.

 

@@ -23,19 +27,21 @@
 * Flush the buffer and unwrap the inner writer unconditionally.

 

 ### More Sensible and Customizable Buffering Behavior

-* Tune the behavior of the buffer to your specific use-case using the types in the `strategy`

-module:

-    * `BufReader` performs reads as dictated by the `ReadStrategy` trait.

-    * `BufReader` moves bytes down to the beginning of the buffer, to make more room at the end, when deemed appropriate by the

-`MoveStrategy` trait.

-    * `BufWriter` flushes bytes to the inner writer when full, or when deemed appropriate by

-        the `FlushStrategy` trait.

-* `Buffer` uses exact allocation instead of leaving it up to `Vec`, which allocates sizes in powers of two.

-    * Vec's behavior is more efficient for frequent growth, but much too greedy for infrequent growth and custom capacities.

+Tune the behavior of the buffer to your specific use-case using the types in the

+`policy` module:

+

+* Refine `BufReader`'s behavior by implementing the `ReaderPolicy` trait or use

+an existing implementation like `MinBuffered` to ensure the buffer always contains

+a minimum number of bytes (until the underlying reader is empty).

+

+* Refine `BufWriter`'s behavior by implementing the `WriterPolicy` trait

+or use an existing implementation like `FlushOn` to flush when a particular byte

+appears in the buffer (used to implement `LineWriter`).

+

 

 ## Usage

 

-####[Documentation](http://docs.rs/buf_redux/)

+#### [Documentation](http://docs.rs/buf_redux/)

 

 `Cargo.toml`:

 ```toml

@@ -67,6 +73,69 @@
 + use buf_redux::LineWriter;

 ```

 

+### Using `MinBuffered`

+The new `policy::MinBuffered` reader-policy can be used to ensure that `BufReader` always has at least a

+certain number of bytes in its buffer. This can be useful for parsing applications that require a 

+certain amount of lookahead.

+

+```rust

+use buf_redux::BufReader;

+use buf_redux::policy::MinBuffered;

+use std::io::{BufRead, Cursor};

+

+let data = (1 .. 16).collect::<Vec<u8>>();

+

+// normally you should use `BufReader::new()` or give a capacity of several KiB or more

+let mut reader = BufReader::with_capacity(8, Cursor::new(data))

+    // always at least 4 bytes in the buffer (or until the source is empty)

+    .set_policy(MinBuffered(4)); // always at least 4 bytes in the buffer

+

+// first buffer fill, same as `std::io::BufReader`

+assert_eq!(reader.fill_buf().unwrap(), &[1, 2, 3, 4, 5, 6, 7, 8]);

+reader.consume(3);

+

+// enough data in the buffer, another read isn't done yet

+assert_eq!(reader.fill_buf().unwrap(), &[4, 5, 6, 7, 8]);

+reader.consume(4);

+

+// `std::io::BufReader` would return `&[8]`

+assert_eq!(reader.fill_buf().unwrap(), &[8, 9, 10, 11, 12, 13, 14, 15]);

+reader.consume(5);

+

+// no data left in the reader

+assert_eq!(reader.fill_buf().unwrap(), &[13, 14, 15]);

+```

+

+### Note: Making Room / Ringbuffers / `slice-deque` Feature

+With policies like `MinBuffered`, that will read into the buffer and consume bytes from it without completely 

+emptying it, normal buffer handling can run out of room to read/write into as all the free space is at the

+head of the buffer. If the amount of data in the buffer is small, you can call `.make_room()` on the buffered

+type to make more room for reading. `MinBuffered` will do this automatically.

+

+Instead of this, with the `slice-deque` feature, you can instead have your buffered type allocate a *ringbuffer*,

+simply by using the `::new_ringbuf()` or `::with_capacity_ringbuf()` constructors instead of 

+`::new()` or `with_capacity()`, respectively. With a ringbuffer, consuming/flushing bytes 

+from a buffer instantly makes room for more reading/writing at the end.

+However, this has some caveats:

+

+* It is only available on target platforms with virtual memory support, namely fully fledged

+OSes such as Windows and Unix-derivative platforms like Linux, OS X, BSD variants, etc.

+

+* The default capacity varies based on platform, and custom capacities are rounded up to a

+multiple of their minimum size, typically the page size of the platform.

+Windows' minimum size is comparably quite large (**64 KiB**) due to some legacy reasons,

+so this may be less optimal than the default capacity for a normal buffer (8 KiB) for some

+use-cases.

+

+* Due to the nature of the virtual-memory trick, the virtual address space the buffer

+allocates will be double its capacity. This means that your program will *appear* to use more

+memory than it would if it was using a normal buffer of the same capacity. The physical memory

+usage will be the same in both cases, but if address space is at a premium in your application

+(32-bit targets) then this may be a concern.

+

+It is up to you to decide if the benefits outweigh the costs. With a policy like `MinBuffered`,

+it could significantly improve performance.

+

 ## License

 

 Licensed under either of

diff --git a/rustc_deps/vendor/buf_redux/src/benches.rs b/rustc_deps/vendor/buf_redux/src/benches.rs
index 243939c..14b1743 100644
--- a/rustc_deps/vendor/buf_redux/src/benches.rs
+++ b/rustc_deps/vendor/buf_redux/src/benches.rs
@@ -5,9 +5,10 @@
 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

 // option. This file may not be copied, modified, or distributed

 // except according to those terms.

+extern crate test;

 

 mod construction {

-    use test;

+    use super::test;

 

     use {BufWriter, BufReader};

 

diff --git a/rustc_deps/vendor/buf_redux/src/buffer/mod.rs b/rustc_deps/vendor/buf_redux/src/buffer/mod.rs
new file mode 100644
index 0000000..2382cb5
--- /dev/null
+++ b/rustc_deps/vendor/buf_redux/src/buffer/mod.rs
@@ -0,0 +1,115 @@
+// Copyright 2018 Austin Bonander <austin.bonander@gmail.com>

+//

+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

+// option. This file may not be copied, modified, or distributed

+// except according to those terms.

+#![allow(missing_docs)]

+

+mod std_buf;

+

+#[cfg(feature = "slice-deque")]

+mod slice_deque_buf;

+

+use self::std_buf::StdBuf;

+

+#[cfg(feature = "slice-deque")]

+use self::slice_deque_buf::SliceDequeBuf;

+

+pub enum BufImpl {

+    Std(StdBuf),

+    #[cfg(feature = "slice-deque")]

+    Ringbuf(SliceDequeBuf),

+}

+

+macro_rules! forward_method {

+    (pub fn $fnname:ident(&self $($args:tt)*) [$($passargs:tt)*] $(-> $ret:ty)*) => {

+        pub fn $fnname(&self $($args)*) $(-> $ret)* {

+            match *self {

+                BufImpl::Std(ref buf) => buf.$fnname($($passargs)*),

+                #[cfg(feature = "slice-deque")]

+                BufImpl::Ringbuf(ref buf) => buf.$fnname($($passargs)*),

+            }

+        }

+    };

+

+    (pub fn $fnname:ident(&mut self $($args:tt)*) [$($passargs:tt)*] $(-> $ret:ty)*) => {

+        pub fn $fnname(&mut self $($args)*) $(-> $ret)* {

+            match *self {

+                BufImpl::Std(ref mut buf) => buf.$fnname($($passargs)*),

+                #[cfg(feature = "slice-deque")]

+                BufImpl::Ringbuf(ref mut buf) => buf.$fnname($($passargs)*),

+            }

+        }

+    };

+

+    (pub unsafe fn $fnname:ident(&self $($args:tt)*) [$($passargs:tt)*] $(-> $ret:ty)*) => {

+        pub unsafe fn $fnname(&self $($args)*) $(-> $ret)* {

+            match *self {

+                BufImpl::Std(ref buf) => buf.$fnname($($passargs)*),

+                #[cfg(feature = "slice-deque")]

+                BufImpl::Ringbuf(ref buf) => buf.$fnname($($passargs)*),

+            }

+        }

+    };

+

+    (pub unsafe fn $fnname:ident(&mut self $($args:tt)*) [$($passargs:tt)*] $(-> $ret:ty)*) => {

+        pub unsafe fn $fnname(&mut self $($args)*) $(-> $ret)* {

+            match *self {

+                BufImpl::Std(ref mut buf) => buf.$fnname($($passargs)*),

+                #[cfg(feature = "slice-deque")]

+                BufImpl::Ringbuf(ref mut buf) => buf.$fnname($($passargs)*),

+            }

+        }

+    };

+}

+

+macro_rules! forward_methods {

+    ($($($qualifiers:ident)+ ($($args:tt)*) [$($passargs:tt)*] $(-> $ret:ty)*);+;) => (

+        $(forward_method! {

+            $($qualifiers)+ ($($args)*) [$($passargs)*] $(-> $ret)*

+        })*

+    )

+}

+

+impl BufImpl {

+    pub fn with_capacity(cap: usize) -> Self {

+        BufImpl::Std(StdBuf::with_capacity(cap))

+    }

+

+    #[cfg(feature = "slice-deque")]

+    pub fn with_capacity_ringbuf(cap: usize) -> Self {

+        BufImpl::Ringbuf(SliceDequeBuf::with_capacity(cap))

+    }

+

+    pub fn is_ringbuf(&self) -> bool {

+        match *self {

+            #[cfg(feature = "slice-deque")]

+            BufImpl::Ringbuf(_) => true,

+            _ => false,

+        }

+    }

+

+    forward_methods! {

+        pub fn capacity(&self)[] -> usize;

+

+        pub fn len(&self)[] -> usize;

+

+        pub fn usable_space(&self)[] -> usize;

+

+        pub fn reserve(&mut self, additional: usize)[additional] -> bool;

+

+        pub fn make_room(&mut self)[];

+

+        pub fn buf(&self)[] -> &[u8];

+

+        pub fn buf_mut(&mut self)[] -> &mut [u8];

+

+        pub unsafe fn write_buf(&mut self)[] -> &mut [u8];

+

+        pub unsafe fn bytes_written(&mut self, add: usize)[add];

+

+        pub fn consume(&mut self, amt: usize)[amt];

+    }

+}

diff --git a/rustc_deps/vendor/buf_redux/src/buffer/slice_deque_buf.rs b/rustc_deps/vendor/buf_redux/src/buffer/slice_deque_buf.rs
new file mode 100644
index 0000000..b6d5d84
--- /dev/null
+++ b/rustc_deps/vendor/buf_redux/src/buffer/slice_deque_buf.rs
@@ -0,0 +1,83 @@
+// Copyright 2018 Austin Bonander <austin.bonander@gmail.com>

+//

+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

+// option. This file may not be copied, modified, or distributed

+// except according to those terms

+//! Move-free buffer and reader utilizing the [`slice-deque`] crate.

+//!

+//! These types are only available on target platforms with virtual memory support,

+//! namely Windows, OS X and Linux.

+//!

+//! [`slice-deque`]: https://crates.io/crates/slice-deque

+extern crate slice_deque;

+use self::slice_deque::SliceDeque;

+

+use std::cmp;

+

+pub struct SliceDequeBuf {

+    deque: SliceDeque<u8>,

+}

+

+/// Move-free buffer utilizing the [`slice-deque`] crate.

+///

+/// Its usable space will always be equal to its free space.

+///

+/// This is only available on target platforms with virtual memory support,

+/// namely Windows, OS X and Linux.

+impl SliceDequeBuf {

+    pub fn with_capacity(cap: usize) -> Self {

+        SliceDequeBuf {

+            deque: SliceDeque::with_capacity(cap),

+        }

+    }

+

+    pub fn capacity(&self) -> usize {

+        self.deque.capacity()

+    }

+

+    pub fn len(&self) -> usize { self.deque.len() }

+

+    pub fn usable_space(&self) -> usize {

+        self.capacity() - self.len()

+    }

+

+    pub fn reserve(&mut self, additional: usize) -> bool {

+        self.deque.reserve(additional);

+        true

+    }

+

+    /// This method is a no-op.

+    pub fn make_room(&mut self) {}

+

+    pub fn buf(&self) -> &[u8] { &self.deque }

+

+    pub fn buf_mut(&mut self) -> &mut [u8] { &mut self.deque }

+

+    pub unsafe fn write_buf(&mut self) -> &mut [u8] {

+        self.deque.tail_head_slice()

+    }

+

+    pub unsafe fn bytes_written(&mut self, add: usize) {

+        let offset = cmp::min(add, self.usable_space()) as isize;

+

+        if offset < 0 {

+            panic!("BufImpl.bytes_written() arg overflowed isize: {:x}", add);

+        }

+

+        self.deque.move_tail(offset);

+    }

+

+    pub fn consume(&mut self, amt: usize) {

+        unsafe {

+            let offset = cmp::min(amt, self.len()) as isize;

+

+            if offset < 0 {

+                panic!("BufImpl.consume() arg overflowed isize: {:x}", amt)

+            }

+

+            self.deque.move_head(offset);

+        }

+    }

+}

diff --git a/rustc_deps/vendor/buf_redux/src/buffer/std_buf.rs b/rustc_deps/vendor/buf_redux/src/buffer/std_buf.rs
new file mode 100644
index 0000000..983b93e
--- /dev/null
+++ b/rustc_deps/vendor/buf_redux/src/buffer/std_buf.rs
@@ -0,0 +1,235 @@
+// Copyright 2016-2018 Austin Bonander <austin.bonander@gmail.com>

+//

+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

+// option. This file may not be copied, modified, or distributed

+// except according to those terms.

+

+use safemem;

+

+use std::cmp;

+

+use self::impl_::RawBuf;

+

+pub struct StdBuf {

+    buf: RawBuf,

+    pos: usize,

+    end: usize,

+}

+

+impl StdBuf {

+    pub fn with_capacity(cap: usize) -> Self {

+        StdBuf {

+            buf: RawBuf::with_capacity(cap),

+            pos: 0,

+            end: 0,

+        }

+    }

+

+    pub fn capacity(&self) -> usize {

+        self.buf.capacity()

+    }

+

+    pub fn len(&self) -> usize {

+        self.end - self.pos

+    }

+

+    pub fn usable_space(&self) -> usize {

+        self.capacity() - self.end

+    }

+

+    pub fn reserve(&mut self, additional: usize) -> bool {

+        self.check_cursors();

+        let usable_space = self.usable_space();

+

+        // there's already enough space

+        if usable_space >= additional { return false }

+

+        // attempt to reserve additional capacity in-place

+        if self.buf.reserve_in_place(additional - usable_space) {

+            return false;

+        }

+

+        // don't copy the contents of the buffer as they're irrelevant now

+        if self.pos == self.end {

+            let capacity = self.buf.capacity();

+            // free the existing memory

+            self.buf = RawBuf::with_capacity(0);

+            self.buf = RawBuf::with_capacity(capacity + additional);

+            return true;

+        }

+

+        self.buf.reserve(additional - usable_space)

+    }

+

+    pub fn make_room(&mut self) {

+        self.check_cursors();

+

+        // no room at the head of the buffer

+        if self.pos == 0 { return; }

+

+        // simply move the bytes down to the beginning

+        let len = self.len();

+

+        safemem::copy_over(unsafe { self.buf.as_mut_slice() },

+                           self.pos, 0, len);

+

+        self.pos = 0;

+        self.end = len;

+    }

+

+    pub fn buf(&self) -> &[u8] {

+        unsafe {

+            &self.buf.as_slice()[self.pos .. self.end]

+        }

+    }

+

+    pub fn buf_mut(&mut self) -> &mut [u8] {

+        unsafe {

+            &mut self.buf.as_mut_slice()[self.pos .. self.end]

+        }

+    }

+

+    pub unsafe fn write_buf(&mut self) -> &mut [u8] {

+        &mut self.buf.as_mut_slice()[self.end ..]

+    }

+

+    pub unsafe fn bytes_written(&mut self, amt: usize) {

+        self.end = cmp::min(self.end + amt, self.capacity());

+    }

+

+    pub fn consume(&mut self, amt: usize) {

+        self.pos = cmp::min(self.pos + amt, self.end);

+        self.check_cursors();

+    }

+

+    pub fn check_cursors(&mut self) -> bool {

+        if self.pos == self.end {

+            self.pos = 0;

+            self.end = 0;

+            true

+        } else {

+            false

+        }

+    }

+}

+

+#[cfg(not(feature = "nightly"))]

+mod impl_ {

+    use std::mem;

+

+    pub struct RawBuf {

+        buf: Box<[u8]>,

+    }

+

+    impl RawBuf {

+        pub fn with_capacity(capacity: usize) -> Self {

+            let mut buf = Vec::with_capacity(capacity);

+            let true_cap = buf.capacity();

+

+            unsafe {

+                buf.set_len(true_cap);

+            }

+

+            RawBuf {

+                buf: buf.into_boxed_slice(),

+            }

+        }

+

+        pub fn capacity(&self) -> usize {

+            self.buf.len()

+        }

+

+        pub fn reserve(&mut self, additional: usize) -> bool {

+            let mut buf = mem::replace(&mut self.buf, Box::new([])).into_vec();

+

+            let old_ptr = self.buf.as_ptr();

+

+            buf.reserve_exact(additional);

+

+            unsafe {

+                let new_cap = buf.capacity();

+                buf.set_len(new_cap);

+            }

+

+            self.buf = buf.into_boxed_slice();

+

+            old_ptr == self.buf.as_ptr()

+        }

+

+        pub fn reserve_in_place(&mut self, _additional: usize) -> bool {

+            // `Vec` does not support this

+            return false;

+        }

+

+        pub unsafe fn as_slice(&self) -> &[u8] {

+            &self.buf

+        }

+

+        pub unsafe fn as_mut_slice(&mut self) -> &mut [u8] {

+            &mut self.buf

+        }

+    }

+}

+

+#[cfg(feature = "nightly")]

+mod impl_ {

+    extern crate alloc;

+

+    use self::alloc::raw_vec::RawVec;

+

+    use std::slice;

+

+    pub struct RawBuf {

+        buf: RawVec<u8>,

+    }

+

+    impl RawBuf {

+        pub fn with_capacity(capacity: usize) -> Self {

+            RawBuf {

+                buf: RawVec::with_capacity(capacity)

+            }

+        }

+

+        pub fn capacity(&self) -> usize {

+            self.buf.cap()

+        }

+

+        pub fn reserve(&mut self, additional: usize) -> bool {

+            let cap = self.capacity();

+            let old_ptr = self.buf.ptr();

+            self.buf.reserve_exact(cap, additional);

+            old_ptr != self.buf.ptr()

+        }

+

+        pub fn reserve_in_place(&mut self, additional: usize) -> bool {

+            let cap = self.capacity();

+            self.buf.reserve_in_place(cap, additional)

+        }

+

+        pub unsafe fn as_slice(&self) -> &[u8] {

+            slice::from_raw_parts(self.buf.ptr(), self.buf.cap())

+        }

+

+        pub unsafe fn as_mut_slice(&mut self) -> &mut [u8] {

+            slice::from_raw_parts_mut(self.buf.ptr(), self.buf.cap())

+        }

+

+    }

+}

+

+#[test]

+fn read_into_full() {

+    use Buffer;

+

+    let mut buffer = Buffer::with_capacity(1);

+

+    assert_eq!(buffer.capacity(), 1);

+

+    let mut bytes = &[1u8, 2][..];

+

+    // Result<usize, io::Error> does not impl PartialEq

+    assert_eq!(buffer.read_from(&mut bytes).unwrap(), 1);

+    assert_eq!(buffer.read_from(&mut bytes).unwrap(), 0);

+}

diff --git a/rustc_deps/vendor/buf_redux/src/lib.rs b/rustc_deps/vendor/buf_redux/src/lib.rs
index 7ff7a69..99c8c41 100644
--- a/rustc_deps/vendor/buf_redux/src/lib.rs
+++ b/rustc_deps/vendor/buf_redux/src/lib.rs
@@ -1,8 +1,8 @@
 // Original implementation Copyright 2013 The Rust Project Developers <https://github.com/rust-lang>

 //

-// Original source file: https://github.com/rust-lang/rust/blob/master/src/libstd/io/buffered.rs

+// Original source file: https://github.com/rust-lang/rust/blob/master/src/libstd/io/buffered.P

 //

-// Additions copyright 2016 Austin Bonander <austin.bonander@gmail.com>

+// Additions copyright 2016-2018 Austin Bonander <austin.bonander@gmail.com>

 //

 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

@@ -51,56 +51,126 @@
 //! * Get the inner writer and trimmed buffer with the unflushed data.

 //!

 //! ### More Sensible and Customizable Buffering Behavior

-//! * Tune the behavior of the buffer to your specific use-case using the types in the [`strategy`

-//! module](strategy/index.html):

-//!     * `BufReader` performs reads as dictated by the [`ReadStrategy` trait](strategy/trait.ReadStrategy.html).

-//!     * `BufReader` moves bytes down to the beginning of the buffer, to make more room at the end, when deemed appropriate by the

-//! [`MoveStrategy` trait](strategy/trait.MoveStrategy.html).

-//!     * `BufWriter` flushes bytes to the inner writer when full, or when deemed appropriate by

-//!         the [`FlushStrategy` trait](strategy/trait.FlushStrategy.html).

-//! * `Buffer` uses exact allocation instead of leaving it up to `Vec`, which allocates sizes in powers of two.

-//!     * Vec's behavior is more efficient for frequent growth, but much too greedy for infrequent growth and custom capacities.

+//! Tune the behavior of the buffer to your specific use-case using the types in the

+//! [`policy` module]:

+//!

+//! * Refine `BufReader`'s behavior by implementing the [`ReaderPolicy` trait] or use

+//! an existing implementation like [`MinBuffered`] to ensure the buffer always contains

+//! a minimum number of bytes (until the underlying reader is empty).

+//!

+//! * Refine `BufWriter`'s behavior by implementing the [`WriterPolicy` trait]

+//! or use an existing implementation like [`FlushOn`] to flush when a particular byte

+//! appears in the buffer (used to implement [`LineWriter`]).

+//!

+//! [`policy` module]: policy

+//! [`ReaderPolicy` trait]: policy::ReaderPolicy

+//! [`MinBuffered`]: policy::MinBuffered

+//! [`WriterPolicy`]: policy::WriterPolicy

+//! [`FlushOn`]: policy::FlushOn

+//! [`LineWriter`]: LineWriter

+//!

+//! ### Making Room

+//! The buffered types of this crate and their `std::io` counterparts, by default, use `Box<[u8]>`

+//! as their buffer types ([`Buffer`](Buffer) is included as well since it is used internally

+//! by the other types in this crate).

+//!

+//! When one of these types inserts bytes into its buffer, via `BufRead::fill_buf()` (implicitly

+//! called by `Read::read()`) in `BufReader`'s case or `Write::write()` in `BufWriter`'s case,

+//! the entire buffer is provided to be read/written into and the number of bytes written is saved.

+//! The read/written data then resides in the `[0 .. bytes_inserted]` slice of the buffer.

+//!

+//! When bytes are consumed from the buffer, via `BufRead::consume()` or `Write::flush()`,

+//! the number of bytes consumed is added to the start of the slice such that the remaining

+//! data resides in the `[bytes_consumed .. bytes_inserted]` slice of the buffer.

+//!

+//! The `std::io` buffered types, and their counterparts in this crate with their default policies,

+//! don't have to deal with partially filled buffers as `BufReader` only reads when empty and

+//! `BufWriter` only flushes when full.

+//!

+//! However, because the replacements in this crate are capable of reading on-demand and flushing

+//! less than a full buffer, they can run out of room in their buffers to read/write data into even

+//! though there is technically free space, because this free space is at the head of the buffer

+//! where reading into it would cause the data in the buffer to become non-contiguous.

+//!

+//! This isn't technically a problem as the buffer could operate like `VecDeque` in `std` and return

+//! both slices at once, but this would not fit all use-cases: the `Read::fill_buf()` interface only

+//! allows one slice to be returned at a time so the older data would need to be completely consumed

+//! before the newer data can be returned; `BufWriter` could support it as the `Write` interface

+//! doesn't make an opinion on how the buffer works, but because the data would be non-contiguous

+//! it would require two flushes to get it all, which could degrade performance.

+//!

+//! The obvious solution, then, is to move the existing data down to the beginning of the buffer

+//! when there is no more room at the end so that more reads/writes into the buffer can be issued.

+//! This works, and may suit some use-cases where the amount of data left is small and thus copying

+//! it would be inexpensive, but it is non-optimal. However, this option is provided

+//! as the `.make_room()` methods, and is utilized by [`policy::MinBuffered`](policy::MinBuffered)

+//! and [`policy::FlushExact`](policy::FlushExact).

+//!

+//! ### Ringbuffers / `slice-deque` Feature

+//! Instead of moving data, however, it is also possible to use virtual-memory tricks to

+//! allocate a ringbuffer that loops around on itself in memory and thus is always contiguous,

+//! as described in [the Wikipedia article on Ringbuffers][ringbuf-wikipedia].

+//!

+//! This is the exact trick used by [the `slice-deque` crate](https://crates.io/crates/slice-deque),

+//! which is now provided as an optional feature `slice-deque` exposed via the

+//! `new_ringbuf()` and `with_capacity_ringbuf()` constructors added to the buffered types here.

+//! When a buffered type is constructed using one of these functions, `.make_room()` is turned into

+//! a no-op as consuming bytes from the head of the buffer simultaneously makes room at the tail.

+//! However, this has some caveats:

+//!

+//! * It is only available on target platforms with virtual memory support, namely fully fledged

+//! OSes such as Windows and Unix-derivative platforms like Linux, OS X, BSD variants, etc.

+//!

+//! * The default capacity varies based on platform, and custom capacities are rounded up to a

+//! multiple of their minimum size, typically the page size of the platform.

+//! Windows' minimum size is comparably quite large (**64 KiB**) due to some legacy reasons,

+//! so this may be less optimal than the default capacity for a normal buffer (8 KiB) for some

+//! use-cases.

+//!

+//! * Due to the nature of the virtual-memory trick, the virtual address space the buffer

+//! allocates will be double its capacity. This means that your program will *appear* to use more

+//! memory than it would if it was using a normal buffer of the same capacity. The physical memory

+//! usage will be the same in both cases, but if address space is at a premium in your application

+//! (32-bit targets) then this may be a concern.

+//!

+//! [ringbuf-wikipedia]: https://en.wikipedia.org/wiki/Circular_buffer#Optimization

 #![warn(missing_docs)]

-#![cfg_attr(feature = "nightly", feature(alloc, specialization))]

-#![cfg_attr(test, feature(test))]

-#![cfg_attr(all(test, feature = "nightly"), feature(io))]

+#![cfg_attr(feature = "nightly", feature(alloc, read_initializer, specialization))]

+#![cfg_attr(all(test, feature = "nightly"), feature(io, test))]

 

 extern crate memchr;

 

 extern crate safemem;

 

-#[cfg(test)]

-extern crate test;

-

 use std::any::Any;

+use std::cell::RefCell;

 use std::io::prelude::*;

 use std::io::SeekFrom;

-use std::{cmp, error, fmt, io, ops, mem};

+use std::{cmp, error, fmt, io, mem, ptr};

 

-#[cfg(test)]

+#[cfg(all(feature = "nightly", test))]

 mod benches;

 

+// std::io's tests require exact allocation which slice_deque cannot provide

 #[cfg(test)]

 mod std_tests;

 

-#[cfg(test)]

-mod tests;

+#[cfg(all(test, feature = "slice-deque"))]

+mod ringbuf_tests;

 

 #[cfg(feature = "nightly")]

 mod nightly;

 

-mod raw;

+#[cfg(feature = "nightly")]

+use nightly::init_buffer;

 

-pub mod strategy;

+mod buffer;

 

-use self::strategy::{

-    MoveStrategy, DefaultMoveStrategy,

-    ReadStrategy, DefaultReadStrategy,

-    FlushStrategy, DefaultFlushStrategy,

-    FlushOnNewline

-};

+use buffer::BufImpl;

 

-use self::raw::RawBuf;

+pub mod policy;

+

+use self::policy::{ReaderPolicy, WriterPolicy, StdPolicy, FlushOnNewline};

 

 const DEFAULT_BUF_SIZE: usize = 8 * 1024;

 

@@ -108,96 +178,126 @@
 ///

 /// Original method names/signatures and implemented traits are left untouched,

 /// making replacement as simple as swapping the import of the type.

-pub struct BufReader<R, Rs = DefaultReadStrategy, Ms = DefaultMoveStrategy>{

+///

+/// By default this type implements the behavior of its `std` counterpart: it only reads into

+/// the buffer when it is empty.

+///

+/// To change this type's behavior, change the policy with [`.set_policy()`] using a type

+/// from the [`policy` module] or your own implementation of [`ReaderPolicy`].

+///

+/// Policies that perform alternating reads and consumes without completely emptying the buffer

+/// may benefit from using a ringbuffer via the [`new_ringbuf()`] and [`with_capacity_ringbuf()`]

+/// constructors. Ringbuffers are only available on supported platforms with the

+/// `slice-deque` feature and have some other caveats; see [the crate root docs][ringbufs-root]

+/// for more details.

+///

+/// [`.set_policy()`]: BufReader::set_policy

+/// [`policy` module]: policy

+/// [`ReaderPolicy`]: policy::ReaderPolicy

+/// [`new_ringbuf()`]: BufReader::new_ringbuf

+/// [`with_capacity_ringbuf()`]: BufReader::with_capacity_ringbuf

+/// [ringbufs-root]: index.html#ringbuffers--slice-deque-feature

+pub struct BufReader<R, P = StdPolicy>{

     // First field for null pointer optimization.

     buf: Buffer,

     inner: R,

-    read_strat: Rs,

-    move_strat: Ms,

+    policy: P,

 }

 

-impl<R> BufReader<R, DefaultReadStrategy, DefaultMoveStrategy> {

-    /// Create a new `BufReader` wrapping `inner`, with a buffer of a

-    /// default capacity and the default strategies.

+impl<R> BufReader<R, StdPolicy> {

+    /// Create a new `BufReader` wrapping `inner`, utilizing a buffer of

+    /// default capacity and the default [`ReaderPolicy`](policy::ReaderPolicy).

     pub fn new(inner: R) -> Self {

-        Self::with_strategies(inner, Default::default(), Default::default())

+        Self::with_capacity(DEFAULT_BUF_SIZE, inner)

     }

 

-    /// Create a new `BufReader` wrapping `inner` with a capacity

-    /// of *at least* `cap` bytes and the default strategies.

+    /// Create a new `BufReader` wrapping `inner`, utilizing a buffer with a capacity

+    /// of *at least* `cap` bytes and the default [`ReaderPolicy`](policy::ReaderPolicy).

     ///

-    /// The actual capacity of the buffer may vary based on

-    /// implementation details of the buffer's allocator.

+    /// The actual capacity of the buffer may vary based on implementation details of the global

+    /// allocator.

     pub fn with_capacity(cap: usize, inner: R) -> Self {

-        Self::with_cap_and_strategies(inner, cap, Default::default(), Default::default())

+        Self::with_buffer(Buffer::with_capacity(cap), inner)

+    }

+

+    /// Create a new `BufReader` wrapping `inner`, utilizing a ringbuffer with the default capacity

+    /// and `ReaderPolicy`.

+    ///

+    /// A ringbuffer never has to move data to make room; consuming bytes from the head

+    /// simultaneously makes room at the tail. This is useful in conjunction with a policy like

+    /// [`MinBuffered`](policy::MinBuffered) to ensure there is always room to read more data

+    /// if necessary, without expensive copying operations.

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The default capacity will differ between Windows and Unix-derivative targets.

+    /// See [`Buffer::new_ringbuf()`](struct.Buffer.html#method.new_ringbuf)

+    /// or [the crate root docs](index.html#ringbuffers--slice-deque-feature) for more info.

+    #[cfg(feature = "slice-deque")]

+    pub fn new_ringbuf(inner: R) -> Self {

+        Self::with_capacity_ringbuf(DEFAULT_BUF_SIZE, inner)

+    }

+

+    /// Create a new `BufReader` wrapping `inner`, utilizing a ringbuffer with *at least* the given

+    /// capacity and the default `ReaderPolicy`.

+    ///

+    /// A ringbuffer never has to move data to make room; consuming bytes from the head

+    /// simultaneously makes room at the tail. This is useful in conjunction with a policy like

+    /// [`MinBuffered`](policy::MinBuffered) to ensure there is always room to read more data

+    /// if necessary, without expensive copying operations.

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The capacity will be rounded up to the minimum size for the target platform.

+    /// See [`Buffer::with_capacity_ringbuf()`](struct.Buffer.html#method.with_capacity_ringbuf)

+    /// or [the crate root docs](index.html#ringbuffers--slice-deque-feature) for more info.

+    #[cfg(feature = "slice-deque")]

+    pub fn with_capacity_ringbuf(cap: usize, inner: R) -> Self {

+        Self::with_buffer(Buffer::with_capacity_ringbuf(cap), inner)

+    }

+

+    /// Wrap `inner` with an existing `Buffer` instance and the default `ReaderPolicy`.

+    ///

+    /// ### Note

+    /// Does **not** clear the buffer first! If there is data already in the buffer

+    /// then it will be returned in `read()` and `fill_buf()` ahead of any data from `inner`.

+    pub fn with_buffer(buf: Buffer, inner: R) -> Self {

+        BufReader {

+            buf, inner, policy: StdPolicy

+        }

     }

 }

 

-impl<R, Rs: ReadStrategy, Ms: MoveStrategy> BufReader<R, Rs, Ms> {

-    /// Create a new `BufReader` wrapping `inner`, with a default buffer capacity

-    /// and with the given `ReadStrategy` and `MoveStrategy`.

-    pub fn with_strategies(inner: R, rs: Rs, ms: Ms) -> Self {

-        Self::with_cap_and_strategies(inner, DEFAULT_BUF_SIZE, rs, ms)

-    }

-

-    /// Create a new `BufReader` wrapping `inner`, with a buffer capacity of *at least*

-    /// `cap` bytes and the given `ReadStrategy` and `MoveStrategy`.

-    /// 

-    /// The actual capacity of the buffer may vary based on

-    /// implementation details of the buffer's allocator.

-    pub fn with_cap_and_strategies(inner: R, cap: usize, rs: Rs, ms: Ms) -> Self {

+impl<R, P> BufReader<R, P> {

+    /// Apply a new `ReaderPolicy` to this `BufReader`, returning the transformed type.

+    pub fn set_policy<P_: ReaderPolicy>(self, policy: P_) -> BufReader<R, P_> {

         BufReader {

-            inner: inner,

-            buf: Buffer::with_capacity(cap),

-            read_strat: rs,

-            move_strat: ms,

-        }

-    }

-

-    /// Apply a new `MoveStrategy` to this `BufReader`, returning the transformed type.

-    pub fn move_strategy<Ms_: MoveStrategy>(self, ms: Ms_) -> BufReader<R, Rs, Ms_> {

-        BufReader { 

             inner: self.inner,

             buf: self.buf,

-            read_strat: self.read_strat,

-            move_strat: ms,

+            policy

         }

     }

 

-    /// Apply a new `ReadStrategy` to this `BufReader`, returning the transformed type.

-    pub fn read_strategy<Rs_: ReadStrategy>(self, rs: Rs_) -> BufReader<R, Rs_, Ms> {

-        BufReader { 

-            inner: self.inner,

-            buf: self.buf,

-            read_strat: rs,

-            move_strat: self.move_strat,

-        }

+    /// Mutate the current [`ReaderPolicy`](policy::ReaderPolicy) in-place.

+    ///

+    /// If you want to change the type, use `.set_policy()`.

+    pub fn policy_mut(&mut self) -> &mut P { &mut self.policy }

+

+    /// Inspect the current `ReaderPolicy`.

+    pub fn policy(&self) -> &P {

+        &self.policy

     }

 

-    /// Accessor for updating the `MoveStrategy` in-place.

-    ///

-    /// If you want to change the type, use `.move_strategy()`.

-    pub fn move_strategy_mut(&mut self) -> &mut Ms { &mut self.move_strat }

-

-    /// Accessor for updating the `ReadStrategy` in-place.

-    ///

-    /// If you want to change the type, use `.read_strategy()`.

-    pub fn read_strategy_mut(&mut self) -> &mut Rs { &mut self.read_strat }

-

     /// Move data to the start of the buffer, making room at the end for more 

     /// reading.

+    ///

+    /// This is a no-op with the `*_ringbuf()` constructors (requires `slice-deque` feature).

     pub fn make_room(&mut self) {

-        self.buf.make_room();        

+        self.buf.make_room();

     }

 

-    /// Grow the internal buffer by *at least* `additional` bytes. May not be

+    /// Ensure room in the buffer for *at least* `additional` bytes. May not be

     /// quite exact due to implementation details of the buffer's allocator.

-    /// 

-    /// ##Note

-    /// This should not be called frequently as each call will incur a 

-    /// reallocation.

-    pub fn grow(&mut self, additional: usize) {

-        self.buf.grow(additional);

+    pub fn reserve(&mut self, additional: usize) {

+        self.buf.reserve(additional);

     }

 

     // RFC: pub fn shrink(&mut self, new_len: usize) ?

@@ -205,13 +305,13 @@
     /// Get the section of the buffer containing valid data; may be empty.

     ///

     /// Call `.consume()` to remove bytes from the beginning of this section.

-    pub fn get_buf(&self) -> &[u8] {

+    pub fn buffer(&self) -> &[u8] {

         self.buf.buf()

     }

 

     /// Get the current number of bytes available in the buffer.

-    pub fn available(&self) -> usize {

-        self.buf.buffered()

+    pub fn buf_len(&self) -> usize {

+        self.buf.len()

     }

 

     /// Get the total buffer capacity.

@@ -234,13 +334,11 @@
         self.inner

     }

 

-    /// Consume `self` and return both the underlying reader and the buffer,

-    /// with the data moved to the beginning and the length truncated to contain

-    /// only valid data.

+    /// Consume `self` and return both the underlying reader and the buffer.

     ///

     /// See also: `BufReader::unbuffer()`

-    pub fn into_inner_with_buf(self) -> (R, Vec<u8>) {

-        (self.inner, self.buf.into_inner())

+    pub fn into_inner_with_buffer(self) -> (R, Buffer) {

+        (self.inner, self.buf)

     }

 

     /// Consume `self` and return an adapter which implements `Read` and will

@@ -251,94 +349,84 @@
             buf: Some(self.buf),

         }

     }

+}

 

+impl<R, P: ReaderPolicy> BufReader<R, P> {

     #[inline]

-    fn should_read(&self) -> bool {

-        self.read_strat.should_read(&self.buf)

-    }

-

-    #[inline]

-    fn should_move(&self) -> bool {

-        self.move_strat.should_move(&self.buf)

+    fn should_read(&mut self) -> bool {

+        self.policy.before_read(&mut self.buf).0

     }

 }

 

-impl<R: Read, Rs: ReadStrategy, Ms: MoveStrategy> BufReader<R, Rs, Ms> {

-    /// Unconditionally perform a read into the buffer, calling `.make_room()`

-    /// if appropriate or necessary, as determined by the implementation.

+impl<R: Read, P> BufReader<R, P> {

+    /// Unconditionally perform a read into the buffer.

     ///

+    /// Does not invoke `ReaderPolicy` methods.

+    /// 

     /// If the read was successful, returns the number of bytes read.

-    pub fn read_into_buf(&mut self) -> io::Result<usize> { 

-        if self.should_move() {

-            self.make_room();

-        }

-        

+    pub fn read_into_buf(&mut self) -> io::Result<usize> {

         self.buf.read_from(&mut self.inner)

     }

-}

 

-impl<R: Read, Rs, Ms> BufReader<R, Rs, Ms> {

     /// Box the inner reader without losing data.

-    pub fn boxed<'a>(self) -> BufReader<Box<Read + 'a>, Rs, Ms> where R: 'a {

+    pub fn boxed<'a>(self) -> BufReader<Box<Read + 'a>, P> where R: 'a {

         let inner: Box<Read + 'a> = Box::new(self.inner);

         

         BufReader {

-            inner: inner,

+            inner,

             buf: self.buf,

-            read_strat: self.read_strat,

-            move_strat: self.move_strat,

+            policy: self.policy,

         }

     }

 }

 

-impl<R: Read, Rs: ReadStrategy, Ms: MoveStrategy> Read for BufReader<R, Rs, Ms> {

+impl<R: Read, P: ReaderPolicy> Read for BufReader<R, P> {

     fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {

-        // If we don't have any buffered data and we're doing a massive read

-        // (larger than our internal buffer), bypass our internal buffer

-        // entirely.

-        if self.buf.is_empty() && out.len() > self.buf.capacity() {

+        // If we don't have any buffered data and we're doing a read matching

+        // or exceeding the internal buffer's capacity, bypass the buffer.

+        if self.buf.is_empty() && out.len() >= self.buf.capacity() {

             return self.inner.read(out);

         }

 

-        let nread = {

-            let mut rem = try!(self.fill_buf());

-            try!(rem.read(out))

-        };

+        let nread = self.fill_buf()?.read(out)?;

         self.consume(nread);

         Ok(nread)

     }

 }

 

-impl<R: Read, Rs: ReadStrategy, Ms: MoveStrategy> BufRead for BufReader<R, Rs, Ms> {

+impl<R: Read, P: ReaderPolicy> BufRead for BufReader<R, P> {

     fn fill_buf(&mut self) -> io::Result<&[u8]> {

         // If we've reached the end of our internal buffer then we need to fetch

         // some more data from the underlying reader.

-        if self.should_read() {

-            let _ = try!(self.read_into_buf());

+        // This execution order is important; the policy may want to resize the buffer or move data

+        // before reading into it.

+        while self.should_read() && self.buf.usable_space() > 0 {

+            if self.read_into_buf()? == 0 { break; };

         }

 

-        Ok(self.get_buf())

+        Ok(self.buffer())

     }

 

-    fn consume(&mut self, amt: usize) {

+    fn consume(&mut self, mut amt: usize) {

+        amt = cmp::min(amt, self.buf_len());

         self.buf.consume(amt);

+        self.policy.after_consume(&mut self.buf, amt);

     }

 }

 

-impl<R: fmt::Debug, Rs: ReadStrategy, Ms: MoveStrategy> fmt::Debug for BufReader<R, Rs, Ms> {

+impl<R: fmt::Debug, P: fmt::Debug> fmt::Debug for BufReader<R, P> {

     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {

         fmt.debug_struct("buf_redux::BufReader")

             .field("reader", &self.inner)

-            .field("available", &self.available())

+            .field("buf_len", &self.buf_len())

             .field("capacity", &self.capacity())

-            .field("read_strategy", &self.read_strat)

-            .field("move_strategy", &self.move_strat)

+            .field("policy", &self.policy)

             .finish()

     }

 }

 

-impl<R: Seek, Rs: ReadStrategy, Ms: MoveStrategy> Seek for BufReader<R, Rs, Ms> {

-    /// Seek to an offset, in bytes, in the underlying reader.

+impl<R: Seek, P: ReaderPolicy> Seek for BufReader<R, P> {

+    /// Seek to an ofPet, in bytes, in the underlying reader.

     ///

     /// The position used for seeking with `SeekFrom::Current(_)` is the

     /// position the underlying reader would be at if the `BufReader` had no

@@ -359,116 +447,141 @@
     fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {

         let result: u64;

         if let SeekFrom::Current(n) = pos {

-            let remainder = self.available() as i64;

+            let remainder = self.buf_len() as i64;

             // it should be safe to assume that remainder fits within an i64 as the alternative

             // means we managed to allocate 8 ebibytes and that's absurd.

             // But it's not out of the realm of possibility for some weird underlying reader to

             // support seeking by i64::min_value() so we need to handle underflow when subtracting

             // remainder.

             if let Some(offset) = n.checked_sub(remainder) {

-                result = try!(self.inner.seek(SeekFrom::Current(offset)));

+                result = self.inner.seek(SeekFrom::Current(offset))?;

             } else {

                 // seek backwards by our remainder, and then by the offset

-                try!(self.inner.seek(SeekFrom::Current(-remainder)));

+                self.inner.seek(SeekFrom::Current(-remainder))?;

                 self.buf.clear(); // empty the buffer

-                result = try!(self.inner.seek(SeekFrom::Current(n)));

+                result = self.inner.seek(SeekFrom::Current(n))?;

             }

         } else {

             // Seeking with Start/End doesn't care about our buffer length.

-            result = try!(self.inner.seek(pos));

+            result = self.inner.seek(pos)?;

         }

         self.buf.clear();

         Ok(result)

     }

 }

 

-/// A type wrapping `Option` which provides more convenient access when the `Some` case is more

-/// common.

-struct AssertSome<T>(Option<T>);

-

-impl<T> AssertSome<T> {

-    fn new(val: T) -> Self {

-        AssertSome(Some(val))

-    }

-

-    fn take(this: &mut Self) -> T {

-        this.0.take().expect("Called AssertSome::take() more than once")

-    }

-

-    fn take_self(this: &mut Self) -> Self {

-        AssertSome(this.0.take())

-    }

-

-    fn is_some(this: &Self) -> bool {

-        this.0.is_some()

-    }

-}

-

-const ASSERT_DEREF_ERR: &'static str = "Attempt to access value of AssertSome after calling AssertSome::take()";

-

-impl<T> ops::Deref for AssertSome<T> {

-    type Target = T;

-    

-    fn deref(&self) -> &T { 

-        self.0.as_ref().expect(ASSERT_DEREF_ERR)

-    }

-}

-

-impl<T> ops::DerefMut for AssertSome<T> {

-    fn deref_mut(&mut self) -> &mut T {

-        self.0.as_mut().expect(ASSERT_DEREF_ERR)

-    }

-}

-

 /// A drop-in replacement for `std::io::BufWriter` with more functionality.

-pub struct BufWriter<W: Write, Fs: FlushStrategy = DefaultFlushStrategy> {

+///

+/// Original method names/signatures and implemented traits are left untouched,

+/// making replacement as simple as swapping the import of the type.

+///

+/// By default this type implements the behavior of its `std` counterpart: it only flushes

+/// the buffer if an incoming write is larger than the remaining space.

+///

+/// To change this type's behavior, change the policy with [`.set_policy()`] using a type

+/// from the [`policy` module] or your own implentation of [`WriterPolicy`].

+///

+/// Policies that perform alternating writes and flushes without completely emptying the buffer

+/// may benefit from using a ringbuffer via the [`new_ringbuf()`] and [`with_capacity_ringbuf()`]

+/// constructors. Ringbuffers are only available on supported platforms with the

+/// `slice-deque` feature and have some caveats; see [the docs at the crate root][ringbufs-root]

+/// for more details.

+///

+/// [`.set_policy()`]: BufWriter::set_policy

+/// [`policy` module]: policy

+/// [`WriterPolicy`]: policy::WriterPolicy

+/// [`new_ringbuf()`]: BufWriter::new_ringbuf

+/// [`with_capacity_ringbuf()`]: BufWriter::with_capacity_ringbuf

+/// [ringbufs-root]: index.html#ringbuffers--slice-deque-feature

+pub struct BufWriter<W: Write, P = StdPolicy> {

     buf: Buffer,

-    inner: AssertSome<W>,

-    flush_strat: Fs,

+    inner: W,

+    policy: P,

     panicked: bool,

 }

 

-impl<W: Write> BufWriter<W, DefaultFlushStrategy> {

-    /// Wrap `inner` with the default buffer capacity and flush strategy.

+impl<W: Write> BufWriter<W> {

+    /// Create a new `BufWriter` wrapping `inner` with the default buffer capacity and

+    /// [`WriterPolicy`](policy::WriterPolicy).

     pub fn new(inner: W) -> Self {

-        Self::with_strategy(inner, Default::default())

+        Self::with_buffer(Buffer::new(), inner)

     }

 

-    /// Wrap `inner` with the given buffer capacity and the default flush strategy.

-    pub fn with_capacity(capacity: usize, inner: W) -> Self {

-        Self::with_capacity_and_strategy(capacity, inner, Default::default())

+    /// Create a new `BufWriter` wrapping `inner`, utilizing a buffer with a capacity

+    /// of *at least* `cap` bytes and the default [`WriterPolicy`](policy::WriterPolicy).

+    ///

+    /// The actual capacity of the buffer may vary based on implementation details of the global

+    /// allocator.

+    pub fn with_capacity(cap: usize, inner: W) -> Self {

+        Self::with_buffer(Buffer::with_capacity(cap), inner)

+    }

+

+    /// Create a new `BufWriter` wrapping `inner`, utilizing a ringbuffer with the default

+    /// capacity and [`WriterPolicy`](policy::WriterPolicy).

+    ///

+    /// A ringbuffer never has to move data to make room; consuming bytes from the head

+    /// simultaneously makes room at the tail. This is useful in conjunction with a policy like

+    ///  [`FlushExact`](policy::FlushExact) to ensure there is always room to write more data if

+    /// necessary, without expensive copying operations.

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The default capacity will differ between Windows and Unix-derivative targets.

+    /// See [`Buffer::new_ringbuf()`](Buffer::new_ringbuf)

+    /// or [the crate root docs](index.html#ringbuffers--slice-deque-feature) for more info.

+    #[cfg(feature = "slice-deque")]

+    pub fn new_ringbuf(inner: W) -> Self {

+        Self::with_buffer(Buffer::new_ringbuf(), inner)

+    }

+

+    /// Create a new `BufWriter` wrapping `inner`, utilizing a ringbuffer with *at least* `cap`

+    /// capacity and the default [`WriterPolicy`](policy::WriterPolicy).

+    ///

+    /// A ringbuffer never has to move data to make room; consuming bytes from the head

+    /// simultaneously makes room at the tail. This is useful in conjunction with a policy like

+    /// [`FlushExact`](policy::FlushExact) to ensure there is always room to write more data if

+    /// necessary, without expensive copying operations.

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The capacity will be rounded up to the minimum size for the target platform.

+    /// See [`Buffer::with_capacity_ringbuf()`](Buffer::with_capacity_ringbuf)

+    /// or [the crate root docs](index.html#ringbuffers--slice-deque-feature) for more info.

+    #[cfg(feature = "slice-deque")]

+    pub fn with_capacity_ringbuf(cap: usize, inner: W) -> Self {

+        Self::with_buffer(Buffer::with_capacity_ringbuf(cap), inner)

+    }

+

+    /// Create a new `BufWriter` wrapping `inner`, utilizing the existing [`Buffer`](Buffer)

+    /// instance and the default [`WriterPolicy`](policy::WriterPolicy).

+    ///

+    /// ### Note

+    /// Does **not** clear the buffer first! If there is data already in the buffer

+    /// it will be written out on the next flush!

+    pub fn with_buffer(buf: Buffer, inner: W) -> BufWriter<W> {

+        BufWriter {

+            buf, inner, policy: StdPolicy, panicked: false,

+        }

     }

 }

 

-impl<W: Write, Fs: FlushStrategy> BufWriter<W, Fs> {

-    /// Wrap `inner` with the default buffer capacity and given flush strategy

-    pub fn with_strategy(inner: W, flush_strat: Fs) -> Self {

-        Self::with_capacity_and_strategy(DEFAULT_BUF_SIZE, inner, flush_strat)

-    }

+impl<W: Write, P> BufWriter<W, P> {

+    /// Set a new [`WriterPolicy`](policy::WriterPolicy), returning the transformed type.

+    pub fn set_policy<P_: WriterPolicy>(self, policy: P_) -> BufWriter<W, P_> {

+        let panicked = self.panicked;

+        let (inner, buf) = self.into_inner_();

 

-    /// Wrap `inner` with the given buffer capacity and flush strategy.

-    pub fn with_capacity_and_strategy(capacity: usize, inner: W, flush_strat: Fs) -> Self {

         BufWriter {

-            inner: AssertSome::new(inner),

-            buf: Buffer::with_capacity(capacity),

-            flush_strat: flush_strat,

-            panicked: false,

+            inner, buf, policy, panicked

         }

     }

 

-    /// Set a new `FlushStrategy`, returning the transformed type.

-    pub fn flush_strategy<Fs_: FlushStrategy>(mut self, flush_strat: Fs_) -> BufWriter<W, Fs_> {

-        BufWriter {

-            inner: AssertSome::take_self(&mut self.inner),

-            buf: mem::replace(&mut self.buf, Buffer::with_capacity(0)),

-            flush_strat: flush_strat,

-            panicked: self.panicked,

-        }

+    /// Mutate the current [`WriterPolicy`](policy::WriterPolicy).

+    pub fn policy_mut(&mut self) -> &mut P {

+        &mut self.policy

     }

 

-    /// Mutate the current flush strategy.

-    pub fn flush_strategy_mut(&mut self) -> &mut Fs {

-        &mut self.flush_strat

+    /// Inspect the current `WriterPolicy`.

+    pub fn policy(&self) -> &P {

+        &self.policy

     }

 

     /// Get a reference to the inner writer.

@@ -478,7 +591,7 @@
 

     /// Get a mutable reference to the inner writer.

     ///

-    /// ###Note

+    /// ### Note

     /// If the buffer has not been flushed, writing directly to the inner type will cause

     /// data inconsistency.

     pub fn get_mut(&mut self) -> &mut W {

@@ -491,116 +604,171 @@
     }

 

     /// Get the number of bytes currently in the buffer.

-    pub fn buffered(&self) -> usize {

-        self.buf.buffered()

+    pub fn buf_len(&self) -> usize {

+        self.buf.len()

     }

 

-    /// Grow the internal buffer by *at least* `additional` bytes. May not be

+    /// Reserve space in the buffer for at least `additional` bytes. May not be

     /// quite exact due to implementation details of the buffer's allocator.

-    /// 

-    /// ##Note

-    /// This should not be called frequently as each call will incur a 

-    /// reallocation.

-    pub fn grow(&mut self, additional: usize) {

-        self.buf.grow(additional);

+    pub fn reserve(&mut self, additional: usize) {

+        self.buf.reserve(additional);

     }

 

+    /// Move data to the start of the buffer, making room at the end for more

+    /// writing.

+    ///

+    /// This is a no-op with the `*_ringbuf()` constructors (requires `slice-deque` feature).

+    pub fn make_room(&mut self) {

+        self.buf.make_room();

+    }

+

+    /// Consume `self` and return both the underlying writer and the buffer

+    pub fn into_inner_with_buffer(self) -> (W, Buffer) {

+        self.into_inner_()

+    }

+

+    // copy the fields out and forget `self` to avoid dropping twice

+    fn into_inner_(self) -> (W, Buffer) {

+        unsafe {

+            // safe because we immediately forget `self`

+            let inner = ptr::read(&self.inner);

+            let buf = ptr::read(&self.buf);

+            mem::forget(self);

+            (inner, buf)

+        }

+    }

+

+    fn flush_buf(&mut self, amt: usize) -> io::Result<()> {

+        if amt == 0 || amt > self.buf.len() { return Ok(()) }

+

+        self.panicked = true;

+        let ret = self.buf.write_max(amt, &mut self.inner);

+        self.panicked = false;

+        ret

+    }

+}

+

+impl<W: Write, P: WriterPolicy> BufWriter<W, P> {

     /// Flush the buffer and unwrap, returning the inner writer on success,

     /// or a type wrapping `self` plus the error otherwise.

     pub fn into_inner(mut self) -> Result<W, IntoInnerError<Self>> {

-        match self.flush_buf() {

+        match self.flush() {

             Err(e) => Err(IntoInnerError(self, e)),

-            Ok(()) => Ok(AssertSome::take(&mut self.inner)),

+            Ok(()) => Ok(self.into_inner_().0),

         }

     }

 

     /// Flush the buffer and unwrap, returning the inner writer and

     /// any error encountered during flushing.

     pub fn into_inner_with_err(mut self) -> (W, Option<io::Error>) {

-        let err = self.flush_buf().err();

-        (AssertSome::take(&mut self.inner), err)

-    }

-

-    /// Consume `self` and return both the underlying writer and the buffer,

-    /// with the data moved to the beginning and the length truncated to contain

-    /// only valid data.

-    pub fn into_inner_with_buf(mut self) -> (W, Vec<u8>){

-        (

-            AssertSome::take(&mut self.inner),

-            mem::replace(&mut self.buf, Buffer::with_capacity(0)).into_inner()

-        )

-    }

-

-    fn flush_buf(&mut self) -> io::Result<()> {

-        self.panicked = true;

-        let ret = self.buf.write_all(&mut *self.inner);

-        self.panicked = false;

-        ret

+        let err = self.flush().err();

+        (self.into_inner_().0, err)

     }

 }

 

-impl<W: Write, Fs: FlushStrategy> Write for BufWriter<W, Fs> {

+impl<W: Write, P: WriterPolicy> Write for BufWriter<W, P> {

     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {

-        if self.flush_strat.flush_before(&self.buf, buf.len()) {

-            try!(self.flush_buf());

-        }

+        let flush_amt = self.policy.before_write(&mut self.buf, buf.len()).0;

+        self.flush_buf(flush_amt)?;

 

-        if buf.len() >= self.buf.capacity() {

+        let written = if self.buf.is_empty() && buf.len() >= self.buf.capacity() {

             self.panicked = true;

-            let ret = self.inner.write(buf);

+            let result = self.inner.write(buf);

             self.panicked = false;

-            ret

+            result?

         } else {

-            Ok(self.buf.copy_from_slice(buf))

-        }

+            self.buf.copy_from_slice(buf)

+        };

+

+        let flush_amt = self.policy.after_write(&self.buf).0;

+

+        let _ = self.flush_buf(flush_amt);

+

+        Ok(written)

     }

 

     fn flush(&mut self) -> io::Result<()> {

-        try!(self.flush_buf());

+        let flush_amt = self.buf.len();

+        self.flush_buf(flush_amt)?;

         self.inner.flush()

     }

 }

 

-impl<W: Write + Seek, Fs: FlushStrategy> Seek for BufWriter<W, Fs> {

-    /// Seek to the offset, in bytes, in the underlying writer.

+impl<W: Write + Seek, P: WriterPolicy> Seek for BufWriter<W, P> {

+    /// Seek to the ofPet, in bytes, in the underlying writer.

     ///

     /// Seeking always writes out the internal buffer before seeking.

     fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {

-        self.flush_buf().and_then(|_| self.get_mut().seek(pos))

+        self.flush().and_then(|_| self.get_mut().seek(pos))

     }

 }

 

-impl<W: fmt::Debug + Write, Fs: FlushStrategy> fmt::Debug for BufWriter<W, Fs> {

+impl<W: Write + fmt::Debug, P: fmt::Debug> fmt::Debug for BufWriter<W, P> {

     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

         f.debug_struct("buf_redux::BufWriter")

-            .field("writer", &*self.inner)

+            .field("writer", &self.inner)

             .field("capacity", &self.capacity())

-            .field("flush_strategy", &self.flush_strat)

+            .field("policy", &self.policy)

             .finish()

     }

 }

 

-impl<W: Write, Fs: FlushStrategy> Drop for BufWriter<W, Fs> {

+

+/// Attempt to flush the buffer to the underlying writer.

+///

+/// If an error occurs, the thread-local handler is invoked, if one was previously

+/// set by [`set_drop_err_handler`](set_drop_err_handler) for this thread.

+impl<W: Write, P> Drop for BufWriter<W, P> {

     fn drop(&mut self) {

-        if AssertSome::is_some(&self.inner) && !self.panicked {

-            // dtors should not panic, so we ignore a failed flush

-            let _r = self.flush_buf();

+        if !self.panicked {

+            // instead of ignoring a failed flush, call the handler

+            let buf_len = self.buf.len();

+            if let Err(err) = self.flush_buf(buf_len) {

+                DROP_ERR_HANDLER.with(|deh| {

+                    (*deh.borrow())(&mut self.inner, &mut self.buf, err)

+                });

+            }

         }

     }

 }

 

 /// A drop-in replacement for `std::io::LineWriter` with more functionality.

+///

+/// This is, in fact, only a thin wrapper around

+/// [`BufWriter`](BufWriter)`<W, `[`policy::FlushOnNewline`](policy::FlushOnNewline)`>`, which

+/// demonstrates the power of custom [`WriterPolicy`](policy::WriterPolicy) implementations.

 pub struct LineWriter<W: Write>(BufWriter<W, FlushOnNewline>);

 

 impl<W: Write> LineWriter<W> {

     /// Wrap `inner` with the default buffer capacity.

     pub fn new(inner: W) -> Self {

-        LineWriter(BufWriter::with_strategy(inner, FlushOnNewline))

+        Self::with_buffer(Buffer::new(), inner)

     }

 

     /// Wrap `inner` with the given buffer capacity.

-    pub fn with_capacity(capacity: usize, inner: W) -> Self {

-        LineWriter(BufWriter::with_capacity_and_strategy(capacity, inner, FlushOnNewline))

+    pub fn with_capacity(cap: usize, inner: W) -> Self {

+        Self::with_buffer(Buffer::with_capacity(cap), inner)

+    }

+

+    /// Wrap `inner` with the default buffer capacity using a ringbuffer.

+    #[cfg(feature = "slice-deque")]

+    pub fn new_ringbuf(inner: W) -> Self {

+        Self::with_buffer(Buffer::new_ringbuf(), inner)

+    }

+

+    /// Wrap `inner` with the given buffer capacity using a ringbuffer.

+    #[cfg(feature = "slice-deque")]

+    pub fn with_capacity_ringbuf(cap: usize, inner: W) -> Self {

+        Self::with_buffer(Buffer::with_capacity_ringbuf(cap), inner)

+    }

+

+    /// Wrap `inner` with an existing `Buffer` instance.

+    ///

+    /// ### Note

+    /// Does **not** clear the buffer first! If there is data already in the buffer

+    /// it will be written out on the next flush!

+    pub fn with_buffer(buf: Buffer, inner: W) -> LineWriter<W> {

+        LineWriter(BufWriter::with_buffer(buf, inner).set_policy(FlushOnNewline))

     }

 

     /// Get a reference to the inner writer.

@@ -610,31 +778,27 @@
 

     /// Get a mutable reference to the inner writer.

     ///

-    /// ###Note

+    /// ### Note

     /// If the buffer has not been flushed, writing directly to the inner type will cause

     /// data inconsistency.

     pub fn get_mut(&mut self) -> &mut W {

         self.0.get_mut()

     }

 

-    /// Get the capacty of the inner buffer.

+    /// Get the capacity of the inner buffer.

     pub fn capacity(&self) -> usize {

         self.0.capacity()

     }

 

     /// Get the number of bytes currently in the buffer.

-    pub fn buffered(&self) -> usize {

-        self.0.buffered()

+    pub fn buf_len(&self) -> usize {

+        self.0.buf_len()

     }

 

-    /// Grow the internal buffer by *at least* `additional` bytes. May not be

+    /// Ensure enough space in the buffer for *at least* `additional` bytes. May not be

     /// quite exact due to implementation details of the buffer's allocator.

-    ///

-    /// ##Note

-    /// This should not be called frequently as each call will incur a

-    /// reallocation.

-    pub fn grow(&mut self, additional: usize) {

-        self.0.grow(additional);

+    pub fn reserve(&mut self, additional: usize) {

+        self.0.reserve(additional);

     }

 

     /// Flush the buffer and unwrap, returning the inner writer on success,

@@ -650,11 +814,9 @@
         self.0.into_inner_with_err()

     }

 

-    /// Consume `self` and return both the underlying writer and the buffer,

-    /// with the data moved to the beginning and the length truncated to contain

-    /// only valid data.

-    pub fn into_inner_with_buf(self) -> (W, Vec<u8>){

-        self.0.into_inner_with_buf()

+    /// Consume `self` and return both the underlying writer and the buffer.

+    pub fn into_inner_with_buf(self) -> (W, Buffer){

+        self.0.into_inner_with_buffer()

     }

 }

 

@@ -668,7 +830,7 @@
     }

 }

 

-impl<W: fmt::Debug + Write> fmt::Debug for LineWriter<W> {

+impl<W: Write + fmt::Debug> fmt::Debug for LineWriter<W> {

     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

         f.debug_struct("buf_redux::LineWriter")

             .field("writer", self.get_ref())

@@ -720,9 +882,7 @@
 ///

 /// Supports interacting via I/O traits like `Read` and `Write`, and direct access.

 pub struct Buffer {

-    buf: RawBuf,

-    pos: usize,

-    end: usize,

+    buf: BufImpl,

     zeroed: usize,

 }

 

@@ -732,143 +892,187 @@
         Self::with_capacity(DEFAULT_BUF_SIZE)

     }

 

-    /// Create a new buffer with the given capacity.

+    /// Create a new buffer with *at least* the given capacity.

     ///

-    /// If the `Vec` ends up with extra capacity, `Buffer` will use all of it.

+    /// If the global allocator returns extra capacity, `Buffer` will use all of it.

     pub fn with_capacity(cap: usize) -> Self {

         Buffer {

-            buf: RawBuf::with_capacity(cap),

-            pos: 0,

-            end: 0,

+            buf: BufImpl::with_capacity(cap),

             zeroed: 0,

         }

     }

 

+    /// Allocate a buffer with a default capacity that never needs to move data to make room

+    /// (consuming from the head simultaneously makes more room at the tail).

+    ///

+    /// The default capacity varies based on the target platform:

+    ///

+    /// * Unix-derivative platforms; Linux, OS X, BSDs, etc: **8KiB** (the default buffer size for

+    /// `std::io` buffered types)

+    /// * Windows: **64KiB** because of legacy reasons, of course (see below)

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The current platforms that are supported/tested are listed

+    /// [in the README for the `slice-deque` crate][slice-deque].

+    ///

+    /// [slice-deque]: https://github.com/gnzlbg/slice_deque#platform-support

+    #[cfg(feature = "slice-deque")]

+    pub fn new_ringbuf() -> Self {

+        Self::with_capacity_ringbuf(DEFAULT_BUF_SIZE)

+    }

+

+    /// Allocate a buffer with *at least* the given capacity that never needs to move data to

+    /// make room (consuming from the head simultaneously makes more room at the tail).

+    ///

+    /// The capacity will be rounded up to the minimum size for the current target:

+    ///

+    /// * Unix-derivative platforms; Linux, OS X, BSDs, etc: the next multiple of the page size

+    /// (typically 4KiB but can vary based on system configuration)

+    /// * Windows: the next muliple of **64KiB**; see [this Microsoft dev blog post][Win-why-64k]

+    /// for why it's 64KiB and not the page size (TL;DR: Alpha AXP needs it and it's applied on

+    /// all targets for consistency/portability)

+    ///

+    /// [Win-why-64k]: https://blogs.msdn.microsoft.com/oldnewthing/20031008-00/?p=42223

+    ///

+    /// Only available on platforms with virtual memory support and with the `slice-deque` feature

+    /// enabled. The current platforms that are supported/tested are listed

+    /// [in the README for the `slice-deque` crate][slice-deque].

+    ///

+    /// [slice-deque]: https://github.com/gnzlbg/slice_deque#platform-support

+    #[cfg(feature = "slice-deque")]

+    pub fn with_capacity_ringbuf(cap: usize) -> Self {

+        Buffer {

+            buf: BufImpl::with_capacity_ringbuf(cap),

+            zeroed: 0,

+        }

+    }

+

+    /// Return `true` if this is a ringbuffer.

+    pub fn is_ringbuf(&self) -> bool {

+        self.buf.is_ringbuf()

+    }

+

     /// Return the number of bytes currently in this buffer.

     ///

     /// Equivalent to `self.buf().len()`.

-    pub fn buffered(&self) -> usize {

-        self.end - self.pos

+    pub fn len(&self) -> usize {

+        self.buf.len()

     }

 

     /// Return the number of bytes that can be read into this buffer before it needs

     /// to grow or the data in the buffer needs to be moved.

-    pub fn headroom(&self) -> usize {

-        self.buf.len() - self.end

+    ///

+    /// This may not constitute all free space in the buffer if bytes have been consumed

+    /// from the head. Use `free_space()` to determine the total free space in the buffer.

+    pub fn usable_space(&self) -> usize {

+        self.buf.usable_space()

+    }

+

+    /// Returns the total amount of free space in the buffer, including bytes

+    /// already consumed from the head.

+    ///

+    /// This will be greater than or equal to `usable_space()`. On supported platforms

+    /// with the `slice-deque` feature enabled, it should be equal.

+    pub fn free_space(&self) -> usize {

+        self.capacity() - self.len()

     }

 

     /// Return the total capacity of this buffer.

     pub fn capacity(&self) -> usize {

-        self.buf.len()

+        self.buf.capacity()

     }

 

     /// Returns `true` if there are no bytes in the buffer, false otherwise.

     pub fn is_empty(&self) -> bool {

-        self.buffered() == 0

+        self.len() == 0

     }

 

-    /// Grow the buffer by `additional` bytes.

+    /// Move bytes down in the buffer to maximize usable space.

     ///

-    /// ###Panics

-    /// If `self.capacity() + additional` overflows.

-    pub fn grow(&mut self, additional: usize) { 

-        self.check_cursors();

+    /// This is a no-op on supported platforms with the `slice-deque` feature enabled.

+    pub fn make_room(&mut self) {

+        self.buf.make_room();

+    }

 

-        // Returns `false` if we reallocated out-of-place and thus need to re-zero.

-        if !self.buf.resize(self.end, additional) {

+    /// Ensure space for at least `additional` more bytes in the buffer.

+    ///

+    /// This is a no-op if `usable_space() >= additional`. Note that this will reallocate

+    /// even if there is enough free space at the head of the buffer for `additional` bytes,

+    /// because that free space is not at the tail where it can be read into.

+    /// If you prefer copying data down in the buffer before attempting to reallocate you may wish

+    /// to call `.make_room()` first.

+    ///

+    /// ### Panics

+    /// If `self.capacity() + additional` overflows.

+    pub fn reserve(&mut self, additional: usize) {

+        // Returns `true` if we reallocated out-of-place and thus need to re-zero.

+        if self.buf.reserve(additional) {

             self.zeroed = 0;

         }

     }

 

-    /// Reset the cursors if there is no data remaining.

-    ///

-    /// Returns true if there is no more potential headroom.

-    fn check_cursors(&mut self) -> bool {

-        if self.pos == 0 {

-            true

-        } else if self.pos == self.end {

-            self.pos = 0;

-            self.end = 0;

-            true

-        } else {

-            false

-        }

-    }

-

-    /// Make room in the buffer, moving data down to the beginning if necessary.

-    ///

-    /// Does not grow the buffer or delete unread bytes from it.

-    pub fn make_room(&mut self) {

-        if self.check_cursors() {

-            return;

-        }

-

-        let copy_amt = self.buffered();

-        // Guaranteed lowering to memmove.

-        safemem::copy_over(self.buf.get_mut(), self.pos, 0, copy_amt);

-

-        self.end -= self.pos;

-        self.pos = 0;

-    }            

-

     /// Get an immutable slice of the available bytes in this buffer.

     ///

     /// Call `.consume()` to remove bytes from the beginning of this slice.

-    pub fn buf(&self) -> &[u8] { self.buf.slice(self.pos .. self.end) }

+    pub fn buf(&self) -> &[u8] { self.buf.buf() }

 

     /// Get a mutable slice representing the available bytes in this buffer.

     ///

     /// Call `.consume()` to remove bytes from the beginning of this slice.

-    pub fn buf_mut(&mut self) -> &mut [u8] { self.buf.slice_mut(self.pos .. self.end) }

+    pub fn buf_mut(&mut self) -> &mut [u8] { self.buf.buf_mut() }

 

     /// Read from `rdr`, returning the number of bytes read or any errors.

     ///

     /// If there is no more room at the head of the buffer, this will return `Ok(0)`.

     ///

-    /// If `<R as TrustRead>::is_trusted(rdr)` returns `true`,

-    /// this method can avoid zeroing the head of the buffer.

+    /// Uses `Read::initializer()` to initialize the buffer if the `nightly`

+    /// feature is enabled, otherwise the buffer is zeroed if it has never been written.

     ///

-    /// See the `TrustRead` trait for more information.

-    ///

-    /// ###Panics

-    /// If the returned count from `rdr.read()` overflows the head cursor of this buffer.

+    /// ### Panics

+    /// If the returned count from `rdr.read()` overflows the tail cursor of this buffer.

     pub fn read_from<R: Read + ?Sized>(&mut self, rdr: &mut R) -> io::Result<usize> {

-        self.check_cursors();

-

-        if self.headroom() == 0 {

+        if self.usable_space() == 0 {

             return Ok(0);

         }

 

-        if !rdr.is_trusted() && self.zeroed < self.buf.len() {

-            let start = cmp::max(self.end, self.zeroed);

+        let cap = self.capacity();

+        if self.zeroed < cap {

+            unsafe {

+                let buf = self.buf.write_buf();

+                init_buffer(&rdr, buf);

+            }

 

-            safemem::write_bytes(self.buf.slice_mut(start..), 0);

-

-            self.zeroed = self.buf.len();

+            self.zeroed = cap;

         }

 

-        let read = try!(rdr.read(self.buf.slice_mut(self.end..)));

+        let read = {

+            let mut buf = unsafe { self.buf.write_buf() };

+            rdr.read(buf)?

+        };

 

-        let new_end = self.end.checked_add(read).expect("Overflow adding bytes read to self.end");

-

-        self.end = cmp::min(self.buf.len(), new_end);

+        unsafe {

+            self.buf.bytes_written(read);

+        }

 

         Ok(read)

     }

 

-    /// Copy from `src` to the head of this buffer. Returns the number of bytes copied.

+    /// Copy from `src` to the tail of this buffer. Returns the number of bytes copied.

     ///

-    /// This will **not** grow the buffer if `src` is larger than `self.headroom()`; instead,

-    /// it will fill the headroom and return the number of bytes copied. If there is no headroom,

-    /// this returns 0.

+    /// This will **not** grow the buffer if `src` is larger than `self.usable_space()`; instead,

+    /// it will fill the usable space and return the number of bytes copied. If there is no usable

+    /// space, this returns 0.

     pub fn copy_from_slice(&mut self, src: &[u8]) -> usize {

-        self.check_cursors();

-    

-        let len = cmp::min(self.buf.len() - self.end, src.len());

+        let len = unsafe {

+            let mut buf = self.buf.write_buf();

+            let len = cmp::min(buf.len(), src.len());

+            buf[..len].copy_from_slice(&src[..len]);

+            len

+        };

 

-        self.buf.slice_mut(self.end .. self.end + len).copy_from_slice(&src[..len]);

-

-        self.end += len;

+        unsafe {

+            self.buf.bytes_written(len);

+        }

 

         len

     }

@@ -877,33 +1081,53 @@
     ///

     /// If the buffer is empty, returns `Ok(0)`.

     ///

-    /// ###Panics

-    /// If the count returned by `wrt.write()` would overflow the tail cursor if added to it.

+    /// ### Panics

+    /// If the count returned by `wrt.write()` would cause the head cursor to overflow or pass

+    /// the tail cursor if added to it.

     pub fn write_to<W: Write + ?Sized>(&mut self, wrt: &mut W) -> io::Result<usize> {

-        self.check_cursors();

-

-        if self.buf.len() == 0 {

+        if self.len() == 0 {

             return Ok(0);

         }

 

-        let written = try!(wrt.write(self.buf()));

-

-        let new_pos = self.pos.checked_add(written)

-            .expect("Overflow adding bytes written to self.pos");

-

-        self.pos = cmp::min(new_pos, self.end);

+        let written = wrt.write(self.buf())?;

+        self.consume(written);

         Ok(written)

     }

 

-    /// Write all bytes in this buffer, ignoring interrupts. Continues writing until the buffer is

-    /// empty or an error is returned.

+    /// Write, at most, the given number of bytes from this buffer to `wrt`, continuing

+    /// to write and ignoring interrupts until the number is reached or the buffer is empty.

     ///

-    /// ###Panics

+    /// ### Panics

+    /// If the count returned by `wrt.write()` would cause the head cursor to overflow or pass

+    /// the tail cursor if added to it.

+    pub fn write_max<W: Write + ?Sized>(&mut self, mut max: usize, wrt: &mut W) -> io::Result<()> {

+        while self.len() > 0 && max > 0 {

+            let len = cmp::min(self.len(), max);

+            let n = match wrt.write(&self.buf()[..len]) {

+                Ok(0) => return Err(io::Error::new(io::ErrorKind::WriteZero,

+                                                   "Buffer::write_all() got zero-sized write")),

+                Ok(n) => n,

+                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,

+                Err(e) => return Err(e),

+            };

+

+            self.consume(n);

+            max = max.saturating_sub(n);

+        }

+

+        Ok(())

+    }

+

+    /// Write all bytes in this buffer to `wrt`, ignoring interrupts. Continues writing until

+    /// the buffer is empty or an error is returned.

+    ///

+    /// ### Panics

     /// If `self.write_to(wrt)` panics.

     pub fn write_all<W: Write + ?Sized>(&mut self, wrt: &mut W) -> io::Result<()> {

-        while self.buffered() > 0 {

+        while self.len() > 0 {

             match self.write_to(wrt) {

-                Ok(0) => return Err(io::Error::new(io::ErrorKind::WriteZero, "Buffer::write_all() got zero-sized write")),

+                Ok(0) => return Err(io::Error::new(io::ErrorKind::WriteZero,

+                                                   "Buffer::write_all() got zero-sized write")),

                 Ok(_) => (),

                 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => (),

                 Err(e) => return Err(e),

@@ -915,8 +1139,6 @@
 

     /// Copy bytes to `out` from this buffer, returning the number of bytes written.

     pub fn copy_to_slice(&mut self, out: &mut [u8]) -> usize {

-        self.check_cursors();

-

         let len = {

             let buf = self.buf();

 

@@ -925,51 +1147,37 @@
             len

         };

 

-        self.pos += len;

+        self.consume(len);

 

         len

     }

 

     /// Push `bytes` to the end of the buffer, growing it if necessary.

+    ///

+    /// If you prefer moving bytes down in the buffer to reallocating, you may wish to call

+    /// `.make_room()` first.

     pub fn push_bytes(&mut self, bytes: &[u8]) {

-        self.check_cursors();

-

         let s_len = bytes.len();

 

-        if self.headroom() < s_len {

-            self.grow(s_len * 2);

+        if self.usable_space() < s_len {

+            self.reserve(s_len * 2);

         }

 

-        self.buf.slice_mut(s_len..).copy_from_slice(bytes);

-        self.end += s_len;

+        unsafe {

+            self.buf.write_buf()[..s_len].copy_from_slice(bytes);

+            self.buf.bytes_written(s_len);

+        }

     }

 

-    /// Consume `amt` bytes from the tail of this buffer. No more than `self.available()` bytes

-    /// will be consumed.

+    /// Consume `amt` bytes from the head of this buffer.

     pub fn consume(&mut self, amt: usize) {

-        let avail = self.buffered();

-

-        if amt >= avail {

-            self.clear();

-        } else {

-            self.pos += amt;

-        }

+        self.buf.consume(amt);

     }

 

-    /// Empty this buffer by resetting the cursors.

+    /// Empty this buffer by consuming all bytes.

     pub fn clear(&mut self) {

-        self.pos = 0;

-        self.end = 0;

-    }

-

-    /// Move the bytes down the beginning of the buffer and take the inner vector, truncated

-    /// to the number of bytes available.

-    pub fn into_inner(mut self) -> Vec<u8> {

-        self.make_room();

-        let avail = self.buffered();

-        let mut buf = self.buf.into_vec();

-        buf.truncate(avail);

-        buf

+        let buf_len = self.len();

+        self.consume(buf_len);

     }

 }

 

@@ -977,13 +1185,13 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

         f.debug_struct("buf_redux::Buffer")

             .field("capacity", &self.capacity())

-            .field("available", &self.buffered())

+            .field("len", &self.len())

             .finish()

     }

 }

 

-/// A `Read` adapter for a consumed `BufReader` which will empty bytes from the buffer before reading from

-/// `inner` directly. Frees the buffer when it has been emptied. 

+/// A `Read` adapter for a consumed `BufReader` which will empty bytes from the buffer before

+/// reading from `R` directly. Frees the buffer when it has been emptied.

 pub struct Unbuffer<R> {

     inner: R,

     buf: Option<Buffer>,

@@ -997,7 +1205,7 @@
 

     /// Returns the number of bytes remaining in the buffer.

     pub fn buf_len(&self) -> usize {

-        self.buf.as_ref().map(Buffer::buffered).unwrap_or(0)

+        self.buf.as_ref().map(Buffer::len).unwrap_or(0)

     }

 

     /// Get a slice over the available bytes in the buffer.

@@ -1038,7 +1246,8 @@
 

 /// Copy data between a `BufRead` and a `Write` without an intermediate buffer.

 ///

-/// Retries on interrupts.

+/// Retries on interrupts. Returns the total bytes copied or the first error;

+/// even if an error is returned some bytes may still have been copied.

 pub fn copy_buf<B: BufRead, W: Write>(b: &mut B, w: &mut W) -> io::Result<u64> {

     let mut total_copied = 0;

 

@@ -1059,42 +1268,27 @@
     Ok(total_copied)

 }

 

-/// A trait which `Buffer` can use to determine whether or not

-/// it is safe to elide zeroing of its buffer.

+thread_local!(

+    static DROP_ERR_HANDLER: RefCell<Box<Fn(&mut Write, &mut Buffer, io::Error)>>

+        = RefCell::new(Box::new(|_, _, _| ()))

+);

+

+/// Set a thread-local handler for errors thrown in `BufWriter`'s `Drop` impl.

 ///

-/// Has a default implementation of `is_trusted()` which always returns `false`.

+/// The `Write` impl, buffer (at the time of the erroring write) and IO error are provided.

 ///

-/// Use the `nightly` feature to enable specialization, which means this

-/// trait can be implemented for specifically trusted types from the stdlib

-/// and potentially elsewhere.

+/// Replaces the previous handler. By default this is a no-op.

 ///

-/// ###Motivation

-/// As part of its intended operation, `Buffer` can pass a potentially

-/// uninitialized slice of its buffer to `Read::read()`. Untrusted readers could access sensitive

-/// information in this slice, from previous usage of that region of memory,

-/// which has not been overwritten yet. Thus, the uninitialized parts of the buffer need to be zeroed

-/// to prevent unintentional leakage of information.

-///

-/// However, for trusted readers which are known to only write to this slice and not read from it,

-/// such as various types in the stdlib which will pass the slice directly to a syscall,

-/// this zeroing is an unnecessary waste of cycles which the optimizer may or may not elide properly.

-///

-/// This trait helps `Buffer` determine whether or not a particular reader is trustworthy.

-pub unsafe trait TrustRead: Read {

-    /// Return `true` if this reader does not need a zeroed slice passed to `.read()`.

-    fn is_trusted(&self) -> bool;

+/// ### Panics

+/// If called from within a handler previously provided to this function.

+pub fn set_drop_err_handler<F: 'static>(handler: F)

+where F: Fn(&mut Write, &mut Buffer, io::Error)

+{

+    DROP_ERR_HANDLER.with(|deh| *deh.borrow_mut() = Box::new(handler))

 }

 

 #[cfg(not(feature = "nightly"))]

-unsafe impl<R: Read> TrustRead for R {

-    /// Default impl which always returns `false`.

-    ///

-    /// Enable the `nightly` feature to specialize this impl for various types.

-    fn is_trusted(&self) -> bool {

-        false

-    }

+fn init_buffer<R: Read + ?Sized>(_r: &R, buf: &mut [u8]) {

+    // we can't trust a reader without nightly

+    safemem::write_bytes(buf, 0);

 }

-

-#[cfg(feature = "nightly")]

-pub use nightly::AssertTrustRead;

-

diff --git a/rustc_deps/vendor/buf_redux/src/nightly.rs b/rustc_deps/vendor/buf_redux/src/nightly.rs
index 442b76a..a81fae5 100644
--- a/rustc_deps/vendor/buf_redux/src/nightly.rs
+++ b/rustc_deps/vendor/buf_redux/src/nightly.rs
@@ -1,127 +1,25 @@
-// Copyright 2016 Austin Bonander <austin.bonander@gmail.com>

+// Copyright 2016-2018 Austin Bonander <austin.bonander@gmail.com>

 //

 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

 // option. This file may not be copied, modified, or distributed

 // except according to those terms.

-use std::fmt;

-use std::io::{self, Read, Write};

 

-use super::TrustRead;

+//! Anything requiring unstable features (specialization, `Read::initializer()`, etc)

+

+use std::fmt;

+use std::io::{Read, Write};

 

 use super::{BufReader, BufWriter, LineWriter};

 

-use strategy::{FlushStrategy, MoveStrategy, ReadStrategy};

+use policy::{WriterPolicy, MoveStrategy, ReaderPolicy};

 

-// ===== TrustRead impls =====

-unsafe impl<R: Read> TrustRead for R {

-    /// Default impl which always returns `false`.

-    default fn is_trusted(&self) -> bool {

-        false

-    }

-}

-

-macro_rules! trust {

-    ($($ty:path),+) => (

-        $(unsafe impl $crate::TrustRead for $ty {

-            /// Unconditional impl that returns `true`.

-            fn is_trusted(&self) -> bool { true }

-        })+

-    )

-}

-

-trust! {

-    ::std::io::Stdin, ::std::fs::File, ::std::net::TcpStream,

-    ::std::io::Empty, ::std::io::Repeat

-}

-

-unsafe impl<'a> TrustRead for &'a [u8] {

-    /// Unconditional impl that returns `true`.

-    fn is_trusted(&self) -> bool { true }

-}

-

-unsafe impl<'a> TrustRead for ::std::io::StdinLock<'a> {

-    /// Unconditional impl that returns `true`.

-    fn is_trusted(&self) -> bool { true }

-}

-

-unsafe impl<T: AsRef<[u8]>> TrustRead for ::std::io::Cursor<T> {

-    /// Unconditional impl that returns `true`.

-    fn is_trusted(&self) -> bool { true }

-}

-

-unsafe impl<R: Read> TrustRead for ::std::io::BufReader<R> {

-    /// Returns `self.get_ref().is_trusted()`

-    fn is_trusted(&self) -> bool { self.get_ref().is_trusted() }

-}

-

-unsafe impl<R: Read, Rs: ReadStrategy, Ms: MoveStrategy> TrustRead for ::BufReader<R, Rs, Ms> {

-    /// Returns `self.get_ref().is_trusted()`

-    fn is_trusted(&self) -> bool { self.get_ref().is_trusted() }

-}

-

-/// A wrapper for a `Read` type that will unconditionally return `true` for `self.is_trusted()`.

-///

-/// See the `TrustRead` trait for more information.

-pub struct AssertTrustRead<R>(R);

-

-impl<R> AssertTrustRead<R> {

-    /// Create a new `AssertTrustRead` wrapping `inner`.

-    ///

-    /// ###Safety

-    /// Because this wrapper will return `true` for `self.is_trusted()`,

-    /// the inner reader may be passed uninitialized memory containing potentially

-    /// sensitive information from previous usage.

-    ///

-    /// Wrapping a reader with this type asserts that the reader will not attempt to access

-    /// the memory passed to `Read::read()`.

-    pub unsafe fn new(inner: R) -> Self {

-        AssertTrustRead(inner)

-    }

-

-    /// Get a reference to the inner reader.

-    pub fn get_ref(&self) -> &R { &self.0 }

-

-    /// Get a mutable reference to the inner reader.

-    ///

-    /// Unlike `BufReader` (from this crate or the stdlib), calling `.read()` through this

-    /// reference cannot cause logical inconsistencies because this wrapper does not take any

-    /// data from the underlying reader.

-    ///

-    /// However, it is best if you use the I/O methods on this wrapper itself, especially with

-    /// `BufReader` or `Buffer` as it allows them to elide zeroing of their buffers.

-    pub fn get_mut(&mut self) -> &mut R { &mut self.0 }

-

-    /// Take the wrapped reader by-value.

-    pub fn into_inner(self) -> R { self.0 }

-}

-

-impl<R> AsRef<R> for AssertTrustRead<R> {

-    fn as_ref(&self) -> &R { self.get_ref() }

-}

-

-impl<R> AsMut<R> for AssertTrustRead<R> {

-    fn as_mut(&mut self) -> &mut R { self.get_mut() }

-}

-

-impl<R: Read> Read for AssertTrustRead<R> {

-    /// Unconditionally calls through to `<R as Read>::read()`.

-    fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {

-        self.0.read(out)

-    }

-}

-

-unsafe impl<R: Read> TrustRead for AssertTrustRead<R> {

-    /// Unconditional impl that returns `true`.

-    fn is_trusted(&self) -> bool { true }

-}

-

-impl<R, Rs: ReadStrategy, Ms: MoveStrategy> fmt::Debug for BufReader<R, Rs, Ms> {

+impl<R, Rs: ReaderPolicy> fmt::Debug for BufReader<R, Rs> {

     default fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

         f.debug_struct("buf_redux::BufReader")

             .field("reader", &"(no Debug impl)")

-            .field("available", &self.available())

+            .field("available", &self.buf_len())

             .field("capacity", &self.capacity())

             .field("read_strategy", &self.read_strat)

             .field("move_strategy", &self.move_strat)

@@ -129,12 +27,12 @@
     }

 }

 

-impl<W: Write, Fs: FlushStrategy> fmt::Debug for BufWriter<W, Fs> {

+impl<W: Write, Fs: WriterPolicy> fmt::Debug for BufWriter<W, Fs> {

     default fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

         f.debug_struct("buf_redux::BufWriter")

             .field("writer", &"(no Debug impl)")

             .field("capacity", &self.capacity())

-            .field("flush_strategy", &self.flush_strat)

+            .field("flush_strategy", &self.policy)

             .finish()

     }

 }

@@ -146,4 +44,10 @@
             .field("capacity", &self.capacity())

             .finish()

     }

-}
\ No newline at end of file
+}

+

+pub fn init_buffer<R: Read + ?Sized>(rdr: &R, buf: &mut [u8]) {

+    // no invariants for consumers to uphold:

+    // https://doc.rust-lang.org/nightly/std/io/trait.Read.html#method.initializer

+    unsafe { rdr.initializer().initialize(buf) }

+}

diff --git a/rustc_deps/vendor/buf_redux/src/policy.rs b/rustc_deps/vendor/buf_redux/src/policy.rs
new file mode 100644
index 0000000..a4f282f
--- /dev/null
+++ b/rustc_deps/vendor/buf_redux/src/policy.rs
@@ -0,0 +1,375 @@
+// Copyright 2016-2018 Austin Bonander <austin.bonander@gmail.com>

+//

+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

+// option. This file may not be copied, modified, or distributed

+// except according to those terms.

+//! Types which can be used to tune the behavior of `BufReader` and `BufWriter`.

+//!

+//! Some simple policies are provided for your convenience. You may prefer to create your own

+//! types and implement the traits for them instead.

+

+use super::Buffer;

+

+/// Flag for `ReaderPolicy` methods to signal whether or not `BufReader` should read into

+/// the buffer.

+///

+/// See `do_read!()` for a shorthand.

+#[derive(Copy, Clone, Debug)]

+pub struct DoRead(pub bool);

+

+/// Shorthand for `return DoRead(bool)` or `return DoRead(true)` (empty invocation)

+#[macro_export]

+macro_rules! do_read (

+    ($val:expr) => ( return $crate::policy::DoRead($val); );

+    () => ( do_read!(true); )

+);

+

+/// Default policy for both `BufReader` and `BufWriter` that reproduces the behaviors of their

+/// `std::io` counterparts:

+///

+/// * `BufReader`: only reads when the buffer is empty, does not resize or move data.

+/// * `BufWriter`: only flushes the buffer when there is not enough room for an incoming write.

+#[derive(Debug, Default)]

+pub struct StdPolicy;

+

+/// Trait that governs `BufReader`'s behavior.

+pub trait ReaderPolicy {

+    /// Consulted before attempting to read into the buffer.

+    ///

+    /// Return `DoRead(true)` to issue a read into the buffer before reading data out of it,

+    /// or `DoRead(false)` to read from the buffer as it is, even if it's empty.

+    /// `do_read!()` is provided as a shorthand.

+    ///

+    /// If there is no room in the buffer after this method is called,

+    /// the buffer will not be read into (so if the buffer is full but you want more data

+    /// you should call `.make_room()` or reserve more space). If there *is* room, `BufReader` will

+    /// attempt to read into the buffer. If successful (`Ok(x)` where `x > 0` is returned), this

+    /// method will be consulted again for another read attempt.

+    ///

+    /// By default, this implements `std::io::BufReader`'s behavior: only read into the buffer if

+    /// it is empty.

+    ///

+    /// ### Note

+    /// If the read will ignore the buffer entirely (if the buffer is empty and the amount to be

+    /// read matches or exceeds its capacity) or if `BufReader::read_into_buf()` was called to force

+    /// a read into the buffer manually, this method will not be called.

+    fn before_read(&mut self, buffer: &mut Buffer) -> DoRead { DoRead(buffer.len() == 0) }

+

+    /// Called after bytes are consumed from the buffer.

+    ///

+    /// Supplies the true amount consumed if the amount passed to `BufReader::consume`

+    /// was in excess.

+    ///

+    /// This is a no-op by default.

+    fn after_consume(&mut self, _buffer: &mut Buffer, _amt: usize) {}

+}

+

+/// Behavior of `std::io::BufReader`: the buffer will only be read into if it is empty.

+impl ReaderPolicy for StdPolicy {}

+

+/// A policy for [`BufReader`](::BufReader) which ensures there is at least the given number of

+/// bytes in  the buffer, failing this only if the reader is at EOF.

+///

+/// If the minimum buffer length is greater than the buffer capacity, it will be resized.

+///

+/// ### Example

+/// ```rust

+/// use buf_redux::BufReader;

+/// use buf_redux::policy::MinBuffered;

+/// use std::io::{BufRead, Cursor};

+/// 

+/// let data = (1 .. 16).collect::<Vec<u8>>();

+///

+/// // normally you should use `BufReader::new()` or give a capacity of several KiB or more

+/// let mut reader = BufReader::with_capacity(8, Cursor::new(data))

+///     // always at least 4 bytes in the buffer (or until the source is empty)

+///     .set_policy(MinBuffered(4)); // always at least 4 bytes in the buffer

+///

+/// // first buffer fill, same as `std::io::BufReader`

+/// assert_eq!(reader.fill_buf().unwrap(), &[1, 2, 3, 4, 5, 6, 7, 8]);

+/// reader.consume(3);

+///

+/// // enough data in the buffer, another read isn't done yet

+/// assert_eq!(reader.fill_buf().unwrap(), &[4, 5, 6, 7, 8]);

+/// reader.consume(4);

+///

+/// // `std::io::BufReader` would return `&[8]`

+/// assert_eq!(reader.fill_buf().unwrap(), &[8, 9, 10, 11, 12, 13, 14, 15]);

+/// reader.consume(5);

+///

+/// // no data left in the reader

+/// assert_eq!(reader.fill_buf().unwrap(), &[13, 14, 15]);

+/// ```

+#[derive(Debug)]

+pub struct MinBuffered(pub usize);

+

+impl MinBuffered {

+    /// Set the number of bytes to ensure are in the buffer.

+    pub fn set_min(&mut self, min: usize) {

+        self.0 = min;

+    }

+}

+

+impl ReaderPolicy for MinBuffered {

+    fn before_read(&mut self, buffer: &mut Buffer) -> DoRead {

+        // do nothing if we have enough data

+        if buffer.len() >= self.0 { do_read!(false) }

+

+        let cap = buffer.capacity();

+

+        // if there's enough room but some of it's stuck after the head

+        if buffer.usable_space() < self.0 && buffer.free_space() >= self.0 {

+            buffer.make_room();

+        } else if cap < self.0 {

+            buffer.reserve(self.0 - cap);

+        }

+

+        DoRead(true)

+    }

+}

+

+/// Flag for `WriterPolicy` methods to tell `BufWriter` how many bytes to flush to the

+/// underlying reader.

+///

+/// See `flush_amt!()` for a shorthand.

+#[derive(Copy, Clone, Debug)]

+pub struct FlushAmt(pub usize);

+

+/// Shorthand for `return FlushAmt(n)` or `return FlushAmt(0)` (empty invocation)

+#[macro_export]

+macro_rules! flush_amt (

+    ($n:expr) => ( return $crate::policy::FlushAmt($n); );

+    () => ( flush_amt!(0); )

+);

+

+/// A trait which tells `BufWriter` when to flush.

+pub trait WriterPolicy {

+    /// Return `FlushAmt(n > 0)` if the buffer should be flushed before reading into it.

+    /// If the returned amount is 0 or greater than the amount of buffered data, no flush is

+    /// performed.

+    ///

+    /// The buffer is provided, as well as `incoming` which is

+    /// the size of the buffer that will be written to the `BufWriter`.

+    ///

+    /// By default, flushes the buffer if the usable space is smaller than the incoming write.

+    fn before_write(&mut self, buf: &mut Buffer, incoming: usize) -> FlushAmt {

+        FlushAmt(if incoming > buf.usable_space() { buf.len() } else { 0 })

+    }

+

+    /// Return `true` if the buffer should be flushed after reading into it.

+    ///

+    /// `buf` references the updated buffer after the read.

+    ///

+    /// Default impl is a no-op.

+    fn after_write(&mut self, _buf: &Buffer) -> FlushAmt {

+        FlushAmt(0)

+    }

+}

+

+/// Default behavior of `std::io::BufWriter`: flush before a read into the buffer

+/// only if the incoming data is larger than the buffer's writable space.

+impl WriterPolicy for StdPolicy {}

+

+/// Flush the buffer if it contains at least the given number of bytes.

+#[derive(Debug, Default)]

+pub struct FlushAtLeast(pub usize);

+

+impl WriterPolicy for FlushAtLeast {

+    fn before_write(&mut self, buf: &mut Buffer, incoming: usize) -> FlushAmt {

+        ensure_capacity(buf, self.0);

+        FlushAmt(if incoming > buf.usable_space() { buf.len() } else { 0 })

+    }

+

+    fn after_write(&mut self, buf: &Buffer) -> FlushAmt {

+        FlushAmt(::std::cmp::max(buf.len(), self.0))

+    }

+}

+

+/// Only ever flush exactly the given number of bytes, until the writer is empty.

+#[derive(Debug, Default)]

+pub struct FlushExact(pub usize);

+

+impl WriterPolicy for FlushExact {

+    /// Flushes the buffer if there is not enough room to fit `incoming` bytes,

+    /// but only when the buffer contains at least `self.0` bytes.

+    ///

+    /// Otherwise, calls [`Buffer::make_room()`](::Buffer::make_room)

+    fn before_write(&mut self, buf: &mut Buffer, incoming: usize) -> FlushAmt {

+        ensure_capacity(buf, self.0);

+

+        // don't have enough room to fit the additional bytes but we can't flush,

+        // then make room for (at least some of) the incoming bytes.

+        if incoming > buf.usable_space() && buf.len() < self.0 {

+            buf.make_room();

+        }

+

+        FlushAmt(self.0)

+    }

+

+    /// Flushes the given amount if possible, nothing otherwise.

+    fn after_write(&mut self, _buf: &Buffer) -> FlushAmt {

+        FlushAmt(self.0)

+    }

+}

+

+/// Flush the buffer if it contains the given byte.

+///

+/// Only scans the buffer after reading. Searches from the end first.

+#[derive(Debug, Default)]

+pub struct FlushOn(pub u8);

+

+impl WriterPolicy for FlushOn {

+    fn after_write(&mut self, buf: &Buffer) -> FlushAmt {

+        // include the delimiter in the flush

+        FlushAmt(::memchr::memrchr(self.0, buf.buf()).map_or(0, |n| n + 1))

+    }

+}

+

+/// Flush the buffer if it contains a newline (`\n`).

+///

+/// Equivalent to `FlushOn(b'\n')`.

+#[derive(Debug, Default)]

+pub struct FlushOnNewline;

+

+impl WriterPolicy for FlushOnNewline {

+    fn after_write(&mut self, buf: &Buffer) -> FlushAmt {

+        FlushAmt(::memchr::memrchr(b'\n', buf.buf()).map_or(0, |n| n + 1))

+    }

+}

+

+fn ensure_capacity(buf: &mut Buffer, min_cap: usize) {

+    let cap = buf.capacity();

+

+    if cap < min_cap {

+        buf.reserve(min_cap - cap);

+    }

+}

+

+#[cfg(test)]

+mod test {

+    use {BufReader, BufWriter};

+    use policy::*;

+    use std::io::{BufRead, Cursor, Write};

+

+    #[test]

+    fn test_min_buffered() {

+        let min_buffered = 4;

+        let data = (0 .. 20).collect::<Vec<u8>>();

+        // create a reader with 0 capacity

+        let mut reader = BufReader::with_capacity(0, Cursor::new(data))

+            .set_policy(MinBuffered(min_buffered));

+

+        // policy reserves the required space in the buffer

+        assert_eq!(reader.fill_buf().unwrap(), &[0, 1, 2, 3][..]);

+        assert_eq!(reader.capacity(), min_buffered);

+

+        // double the size now that the buffer's full

+        reader.reserve(min_buffered);

+        assert_eq!(reader.capacity(), min_buffered * 2);

+

+        // we haven't consumed anything, the reader should have the same data

+        assert_eq!(reader.fill_buf().unwrap(), &[0, 1, 2, 3]);

+        reader.consume(2);

+        // policy read more data, `std::io::BufReader` doesn't do that

+        assert_eq!(reader.fill_buf().unwrap(), &[2, 3, 4, 5, 6, 7]);

+        reader.consume(4);

+        // policy made room and read more

+        assert_eq!(reader.fill_buf().unwrap(), &[6, 7, 8, 9, 10, 11, 12, 13]);

+        reader.consume(4);

+        assert_eq!(reader.fill_buf().unwrap(), &[10, 11, 12, 13]);

+        reader.consume(2);

+        assert_eq!(reader.fill_buf().unwrap(), &[12, 13, 14, 15, 16, 17, 18, 19]);

+        reader.consume(8);

+        assert_eq!(reader.fill_buf().unwrap(), &[])

+    }

+

+    #[test]

+    fn test_flush_at_least() {

+        let flush_min = 4;

+

+        let mut writer = BufWriter::with_capacity(0, vec![]).set_policy(FlushAtLeast(flush_min));

+        assert_eq!(writer.capacity(), 0);

+        assert_eq!(writer.write(&[1]).unwrap(), 1);

+        // policy reserved space for writing

+        assert_eq!(writer.capacity(), flush_min);

+        // one byte in buffer, we want to double our capacity

+        writer.reserve(flush_min * 2 - 1);

+        assert_eq!(writer.capacity(), flush_min * 2);

+

+        assert_eq!(writer.write(&[2, 3]).unwrap(), 2);

+        // no flush yet, only 3 bytes in buffer

+        assert_eq!(*writer.get_ref(), &[]);

+

+        assert_eq!(writer.write(&[4, 5, 6]).unwrap(), 3);

+        // flushed all

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 4, 5, 6]);

+

+        assert_eq!(writer.write(&[7, 8, 9]).unwrap(), 3);

+        // `.into_inner()` should flush always

+        assert_eq!(writer.into_inner().unwrap(), &[1, 2, 3, 4, 5, 6, 7, 8, 9]);

+    }

+

+    #[test]

+    fn test_flush_exact() {

+        let flush_exact = 4;

+

+        let mut writer = BufWriter::with_capacity(0, vec![]).set_policy(FlushExact(flush_exact));

+        assert_eq!(writer.capacity(), 0);

+        assert_eq!(writer.write(&[1]).unwrap(), 1);

+        // policy reserved space for writing

+        assert_eq!(writer.capacity(), flush_exact);

+        // one byte in buffer, we want to double our capacity

+        writer.reserve(flush_exact * 2 - 1);

+        assert_eq!(writer.capacity(), flush_exact * 2);

+

+        assert_eq!(writer.write(&[2, 3]).unwrap(), 2);

+        // no flush yet, only 3 bytes in buffer

+        assert_eq!(*writer.get_ref(), &[]);

+

+        assert_eq!(writer.write(&[4, 5, 6]).unwrap(), 3);

+        // flushed exactly 4 bytes

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 4]);

+

+        assert_eq!(writer.write(&[7, 8, 9, 10]).unwrap(), 4);

+        // flushed another 4 bytes

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 4, 5, 6, 7, 8]);

+        // `.into_inner()` should flush always

+        assert_eq!(writer.into_inner().unwrap(), &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);

+    }

+

+    #[test]

+    fn test_flush_on() {

+        let mut writer = BufWriter::with_capacity(8, vec![]).set_policy(FlushOn(0));

+

+        assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);

+        assert_eq!(*writer.get_ref(), &[]);

+

+        assert_eq!(writer.write(&[0, 4, 5]).unwrap(), 3);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 0]);

+

+        assert_eq!(writer.write(&[6, 7, 8, 9, 10, 11, 12]).unwrap(), 7);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 0, 4, 5]);

+

+        assert_eq!(writer.write(&[0]).unwrap(), 1);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, 0, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0]);

+    }

+

+    #[test]

+    fn test_flush_on_newline() {

+        let mut writer = BufWriter::with_capacity(8, vec![]).set_policy(FlushOnNewline);

+

+        assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);

+        assert_eq!(*writer.get_ref(), &[]);

+

+        assert_eq!(writer.write(&[b'\n', 4, 5]).unwrap(), 3);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, b'\n']);

+

+        assert_eq!(writer.write(&[6, 7, 8, 9, b'\n', 11, 12]).unwrap(), 7);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, b'\n', 4, 5, 6, 7, 8, 9, b'\n']);

+

+        assert_eq!(writer.write(&[b'\n']).unwrap(), 1);

+        assert_eq!(*writer.get_ref(), &[1, 2, 3, b'\n', 4, 5, 6, 7, 8, 9, b'\n', 11, 12, b'\n']);

+    }

+}

diff --git a/rustc_deps/vendor/buf_redux/src/raw.rs b/rustc_deps/vendor/buf_redux/src/raw.rs
deleted file mode 100644
index 520ffcc..0000000
--- a/rustc_deps/vendor/buf_redux/src/raw.rs
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2016 Austin Bonander <austin.bonander@gmail.com>

-//

-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

-// option. This file may not be copied, modified, or distributed

-// except according to those terms.

-

-pub use self::impl_::RawBuf;

-

-#[cfg(not(feature = "nightly"))]

-mod impl_ {

-    use std::ops::{Index, IndexMut};

-

-    pub struct RawBuf {

-        buf: Vec<u8>,

-    }

-

-    impl RawBuf {

-        pub fn with_capacity(capacity: usize) -> Self {

-            let mut buf = Vec::with_capacity(capacity);

-            let true_cap = buf.capacity();

-

-            unsafe {

-                buf.set_len(true_cap);

-            }

-

-            RawBuf {

-                buf: buf

-            }

-        }

-

-        pub fn len(&self) -> usize {

-            self.buf.capacity()

-        }

-

-        pub fn get_mut(&mut self) -> &mut [u8] {

-            &mut self.buf

-        }

-

-        pub fn slice<R>(&self, range: R) -> &<[u8] as Index<R>>::Output

-        where [u8]: Index<R> {

-            &(*self.buf)[range]

-        }

-

-        pub fn slice_mut<R>(&mut self, range: R) -> &mut <[u8] as Index<R>>::Output

-        where [u8]: IndexMut<R> {

-            &mut (*self.buf)[range]

-        }

-

-        pub fn resize(&mut self, used: usize, additional: usize) -> bool {

-            let cap = self.buf.capacity();

-

-            assert!(used <= cap, "Cannot have used more than current capacity");

-

-            let old_ptr = self.buf.as_ptr();

-

-            if used == 0 {

-                *self = RawBuf::with_capacity(

-                    cap.checked_add(additional)

-                        .expect("overflow evalutating additional capacity")

-                );

-

-                return false;

-            }

-

-            self.buf.reserve_exact(additional);

-

-            unsafe {

-                let new_cap = self.len();

-                self.buf.set_len(new_cap);

-            }

-

-            old_ptr == self.buf.as_ptr()

-        }

-

-        pub fn into_vec(self) -> Vec<u8> {

-            self.buf

-        }

-    }

-}

-

-#[cfg(feature = "nightly")]

-mod impl_ {

-    extern crate alloc;

-

-    use self::alloc::raw_vec::RawVec;

-

-    use std::slice;

-    use std::ops::{Index, IndexMut};

-

-    pub struct RawBuf {

-        buf: RawVec<u8>,

-    }

-

-    impl RawBuf {

-        pub fn with_capacity(capacity: usize) -> Self {

-            RawBuf {

-                buf: RawVec::with_capacity(capacity)

-            }

-        }

-

-        pub fn len(&self) -> usize {

-            self.buf.cap()

-        }

-

-        pub fn get(&self) -> &[u8] {

-            unsafe {

-                slice::from_raw_parts(self.buf.ptr(), self.len())

-            }

-        }

-

-        pub fn get_mut(&mut self) -> &mut [u8] {

-            unsafe {

-                slice::from_raw_parts_mut(self.buf.ptr(), self.len())

-            }

-        }

-

-        pub fn slice<R>(&self, range: R) -> &<[u8] as Index<R>>::Output

-        where [u8]: Index<R> {

-            &self.get()[range]

-        }

-

-        pub fn slice_mut<R>(&mut self, range: R) -> &mut <[u8] as Index<R>>::Output

-        where [u8]: IndexMut<R> {

-            &mut self.get_mut()[range]

-        }

-

-        pub fn resize(&mut self, used: usize, additional: usize) -> bool {

-            let cap = self.len();

-

-            assert!(used <= cap, "Cannot have used more than current capacity");

-

-            if !self.buf.reserve_in_place(cap, additional) {

-                let old_ptr = self.buf.ptr();

-

-                if used == 0 {

-                    // Free the old buf and alloc a new one so the allocator doesn't

-                    // bother copying bytes we no longer care about

-                    self.buf = RawVec::with_capacity(

-                        cap.checked_add(additional)

-                            .expect("Overflow evaluating additional capacity")

-                    );

-                } else {

-                    self.buf.reserve_exact(cap, additional);

-                }

-

-                return old_ptr == self.buf.ptr();

-            }

-

-            true

-        }

-

-        pub fn into_vec(self) -> Vec<u8> {

-            unsafe {

-                self.buf.into_box().into_vec()

-            }

-        }

-    }

-}

diff --git a/rustc_deps/vendor/buf_redux/src/ringbuf_tests.rs b/rustc_deps/vendor/buf_redux/src/ringbuf_tests.rs
new file mode 100644
index 0000000..3501e19
--- /dev/null
+++ b/rustc_deps/vendor/buf_redux/src/ringbuf_tests.rs
@@ -0,0 +1,287 @@
+// Original implementation Copyright 2013 The Rust Project Developers <https://github.com/rust-lang>

+//

+// Original source file: https://github.com/rust-lang/rust/blob/master/src/libstd/io/buffered.rs

+//

+// Modifications copyright 2018 Austin Bonander <austin.bonander@gmail.com>

+//

+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

+// option. This file may not be copied, modified, or distributed

+// except according to those terms.

+

+//! Tests checking `Buffer::new_ringbuf()` and friends.

+//!

+//! Some may be adapted from rust/src/libstd/io/buffered.rs

+//!

+//! Since `SliceDeque` rounds allocations up to the page size or larger, these cannot assume

+//! a small capacity like `std_test` does.

+

+// TODO: add tests centered around the mirrored buf boundary

+

+use std::io::prelude::*;

+use std::io::{self, SeekFrom};

+

+use {Buffer, BufReader, DEFAULT_BUF_SIZE};

+

+use std_tests::ShortReader;

+

+macro_rules! assert_capacity {

+    ($buf:expr, $cap:expr) => {

+        let cap = $buf.capacity();

+            if cfg!(windows) {

+            // Windows' minimum allocation size is 64K

+            assert_eq!(cap, ::std::cmp::max(64 * 1024, cap));

+        } else {

+            assert_eq!(cap, $cap);

+        }

+    }

+}

+

+#[test]

+fn test_buffer_new() {

+    let buf = Buffer::new_ringbuf();

+    assert_capacity!(buf, DEFAULT_BUF_SIZE);

+    assert_eq!(buf.capacity(), buf.usable_space());

+}

+

+#[test]

+fn test_buffer_with_cap() {

+    let buf = Buffer::with_capacity_ringbuf(4 * 1024);

+    assert_capacity!(buf, 4 * 1024);

+

+    // test rounding up to page size

+    let buf = Buffer::with_capacity_ringbuf(64);

+    assert_capacity!(buf, 4 * 1024);

+    assert_eq!(buf.capacity(), buf.usable_space());

+}

+

+#[test]

+fn test_buffered_reader() {

+    let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];

+    let mut reader = BufReader::new_ringbuf(inner);

+

+    let mut buf = [0, 0, 0];

+    let nread = reader.read(&mut buf);

+    assert_eq!(nread.unwrap(), 3);

+    let b: &[_] = &[5, 6, 7];

+    assert_eq!(buf, b);

+

+    let mut buf = [0, 0];

+    let nread = reader.read(&mut buf);

+    assert_eq!(nread.unwrap(), 2);

+    let b: &[_] = &[0, 1];

+    assert_eq!(buf, b);

+

+    let mut buf = [0];

+    let nread = reader.read(&mut buf);

+    assert_eq!(nread.unwrap(), 1);

+    let b: &[_] = &[2];

+    assert_eq!(buf, b);

+

+    let mut buf = [0, 0, 0];

+    let nread = reader.read(&mut buf);

+    assert_eq!(nread.unwrap(), 2);

+    let b: &[_] = &[3, 4, 0];

+    assert_eq!(buf, b);

+

+    assert_eq!(reader.read(&mut buf).unwrap(), 0);

+}

+

+#[test]

+fn test_buffered_reader_seek() {

+    let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];

+    let mut reader = BufReader::new_ringbuf(io::Cursor::new(inner));

+

+    assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));

+    assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));

+    assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(3));

+    assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));

+    assert_eq!(reader.seek(SeekFrom::Current(1)).ok(), Some(4));

+    assert_eq!(reader.fill_buf().ok(), Some(&[1, 2, 3, 4][..]));

+    reader.consume(1);

+    assert_eq!(reader.seek(SeekFrom::Current(-2)).ok(), Some(3));

+    assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));

+}

+

+#[test]

+fn test_buffered_reader_seek_underflow() {

+    // gimmick reader that yields its position modulo 256 for each byte

+    struct PositionReader {

+        pos: u64

+    }

+    impl Read for PositionReader {

+        fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {

+            let len = buf.len();

+            for x in buf {

+                *x = self.pos as u8;

+                self.pos = self.pos.wrapping_add(1);

+            }

+            Ok(len)

+        }

+    }

+    impl Seek for PositionReader {

+        fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {

+            match pos {

+                SeekFrom::Start(n) => {

+                    self.pos = n;

+                }

+                SeekFrom::Current(n) => {

+                    self.pos = self.pos.wrapping_add(n as u64);

+                }

+                SeekFrom::End(n) => {

+                    self.pos = u64::max_value().wrapping_add(n as u64);

+                }

+            }

+            Ok(self.pos)

+        }

+    }

+

+    let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });

+    assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));

+    assert_eq!(reader.seek(SeekFrom::End(-5)).ok(), Some(u64::max_value()-5));

+    assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));

+    // the following seek will require two underlying seeks

+    let expected = 9223372036854775802;

+    assert_eq!(reader.seek(SeekFrom::Current(i64::min_value())).ok(), Some(expected));

+    assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));

+    // seeking to 0 should empty the buffer.

+    assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(expected));

+    assert_eq!(reader.get_ref().pos, expected);

+}

+

+#[test]

+fn test_read_until() {

+    let inner: &[u8] = &[0, 1, 2, 1, 0];

+    let mut reader = BufReader::with_capacity(2, inner);

+    let mut v = Vec::new();

+    reader.read_until(0, &mut v).unwrap();

+    assert_eq!(v, [0]);

+    v.truncate(0);

+    reader.read_until(2, &mut v).unwrap();

+    assert_eq!(v, [1, 2]);

+    v.truncate(0);

+    reader.read_until(1, &mut v).unwrap();

+    assert_eq!(v, [1]);

+    v.truncate(0);

+    reader.read_until(8, &mut v).unwrap();

+    assert_eq!(v, [0]);

+    v.truncate(0);

+    reader.read_until(9, &mut v).unwrap();

+    assert_eq!(v, []);

+}

+

+#[test]

+fn test_read_line() {

+    let in_buf: &[u8] = b"a\nb\nc";

+    let mut reader = BufReader::with_capacity(2, in_buf);

+    let mut s = String::new();

+    reader.read_line(&mut s).unwrap();

+    assert_eq!(s, "a\n");

+    s.truncate(0);

+    reader.read_line(&mut s).unwrap();

+    assert_eq!(s, "b\n");

+    s.truncate(0);

+    reader.read_line(&mut s).unwrap();

+    assert_eq!(s, "c");

+    s.truncate(0);

+    reader.read_line(&mut s).unwrap();

+    assert_eq!(s, "");

+}

+

+#[test]

+fn test_lines() {

+    let in_buf: &[u8] = b"a\nb\nc";

+    let reader = BufReader::with_capacity(2, in_buf);

+    let mut it = reader.lines();

+    assert_eq!(it.next().unwrap().unwrap(), "a".to_string());

+    assert_eq!(it.next().unwrap().unwrap(), "b".to_string());

+    assert_eq!(it.next().unwrap().unwrap(), "c".to_string());

+    assert!(it.next().is_none());

+}

+

+#[test]

+fn test_short_reads() {

+    let inner = ShortReader{lengths: vec![0, 1, 2, 0, 1, 0]};

+    let mut reader = BufReader::new(inner);

+    let mut buf = [0, 0];

+    assert_eq!(reader.read(&mut buf).unwrap(), 0);

+    assert_eq!(reader.read(&mut buf).unwrap(), 1);

+    assert_eq!(reader.read(&mut buf).unwrap(), 2);

+    assert_eq!(reader.read(&mut buf).unwrap(), 0);

+    assert_eq!(reader.read(&mut buf).unwrap(), 1);

+    assert_eq!(reader.read(&mut buf).unwrap(), 0);

+    assert_eq!(reader.read(&mut buf).unwrap(), 0);

+}

+

+#[cfg(feature = "nightly")]

+#[test]

+fn read_char_buffered() {

+    let buf = [195, 159];

+    let reader = BufReader::with_capacity(1, &buf[..]);

+    assert_eq!(reader.chars().next().unwrap().unwrap(), 'ß');

+}

+

+#[cfg(feature = "nightly")]

+#[test]

+fn test_chars() {

+    let buf = [195, 159, b'a'];

+    let reader = BufReader::with_capacity(1, &buf[..]);

+    let mut it = reader.chars();

+    assert_eq!(it.next().unwrap().unwrap(), 'ß');

+    assert_eq!(it.next().unwrap().unwrap(), 'a');

+    assert!(it.next().is_none());

+}

+

+/// Test that the ringbuffer wraps as intended

+#[test]

+fn test_mirror_boundary() {

+    // pretends the given bytes have been read

+    struct FakeReader(usize);

+

+    impl Read for FakeReader {

+        fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {

+            Ok(self.0)

+        }

+    }

+

+    let mut buffer = Buffer::new_ringbuf();

+    let cap = buffer.capacity();

+

+    // declaring these as variables for sanity

+    let read_amt = cap; // fill the buffer

+    let test_slice = &[1, 2, 3, 4, 5];

+    let consume_amt = read_amt - 5; // leave several bytes on the head side of the mirror

+

+    assert_eq!(buffer.read_from(&mut FakeReader(read_amt)).unwrap(), read_amt);

+    assert_eq!(buffer.usable_space(), cap - read_amt); // should be 0

+    assert_eq!(buffer.read_from(&mut FakeReader(read_amt)).unwrap(), 0); // buffer is full

+    buffer.consume(consume_amt);

+    assert_eq!(buffer.usable_space(), consume_amt);

+    assert_eq!(buffer.copy_from_slice(test_slice), test_slice.len());

+

+    // zeroes are the bytes we didn't consume

+    assert_eq!(buffer.buf(), &[0, 0, 0, 0, 0, 1, 2, 3, 4, 5]);

+    buffer.clear();

+    assert_eq!(buffer.usable_space(), cap);

+}

+

+#[test]

+fn issue_8(){

+    let source = vec![0u8; 4096*4];

+

+    let mut rdr = BufReader::with_capacity_ringbuf(4096, source.as_slice());

+

+    loop {

+        let n = rdr.read_into_buf().unwrap();

+        if n == 0 {

+            break;

+        }

+        rdr.consume(4000);

+        // rdr.make_room(); // (only necessary with 'standard' reader)

+

+        println!("{}", n);

+    }

+}

+

+// `BufWriter` doesn't utilize a ringbuffer

diff --git a/rustc_deps/vendor/buf_redux/src/std_tests.rs b/rustc_deps/vendor/buf_redux/src/std_tests.rs
index 6e1ef00..aebe929 100644
--- a/rustc_deps/vendor/buf_redux/src/std_tests.rs
+++ b/rustc_deps/vendor/buf_redux/src/std_tests.rs
@@ -2,7 +2,7 @@
 //

 // Original source file: https://github.com/rust-lang/rust/blob/master/src/libstd/io/buffered.rs

 //

-// Additions copyright 2016 Austin Bonander <austin.bonander@gmail.com>

+// Modifications copyright 2016-2018 Austin Bonander <austin.bonander@gmail.com>

 //

 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

@@ -10,13 +10,16 @@
 // option. This file may not be copied, modified, or distributed

 // except according to those terms.

 

+//! These tests are copied from rust/src/libstd/io/buffered.rs

+//! They assume exact capacity allocation

+

 use std::io::prelude::*;

 use std::io::{self, SeekFrom};

-use super::{BufReader, BufWriter};

+use {BufReader, BufWriter, LineWriter};

 

 /// A dummy reader intended at testing short-reads propagation.

 pub struct ShortReader {

-    lengths: Vec<usize>,

+    pub lengths: Vec<usize>,

 }

 

 impl Read for ShortReader {

@@ -267,3 +270,45 @@
     w.write_all(&[8, 9]).unwrap();

     assert_eq!(&w.into_inner().unwrap().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);

 }

+

+#[test]

+fn test_line_buffer() {

+    let mut writer = LineWriter::new(Vec::new());

+    writer.write(&[0]).unwrap();

+    assert_eq!(*writer.get_ref(), []);

+    writer.write(&[1]).unwrap();

+    assert_eq!(*writer.get_ref(), []);

+    writer.flush().unwrap();

+    assert_eq!(*writer.get_ref(), [0, 1]);

+    writer.write(&[0, b'\n', 1, b'\n', 2]).unwrap();

+    assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']);

+    writer.flush().unwrap();

+    assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]);

+    writer.write(&[3, b'\n']).unwrap();

+    assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']);

+}

+

+#[test]

+fn test_buf_writer_drops_once() {

+    struct CountDrops(usize);

+

+    impl Write for CountDrops {

+        fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {

+            unimplemented!()

+        }

+

+        fn flush(&mut self) -> io::Result<()> {

+            unimplemented!()

+        }

+    }

+

+    impl Drop for CountDrops {

+        fn drop(&mut self) {

+            assert_eq!(self.0, 0);

+            self.0 += 1;

+        }

+    }

+

+    let writer = BufWriter::new(CountDrops(0));

+    let (_, _) = writer.into_inner_with_buffer();

+}

diff --git a/rustc_deps/vendor/buf_redux/src/strategy.rs b/rustc_deps/vendor/buf_redux/src/strategy.rs
deleted file mode 100644
index 07beace..0000000
--- a/rustc_deps/vendor/buf_redux/src/strategy.rs
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2016 Austin Bonander <austin.bonander@gmail.com>

-//

-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

-// option. This file may not be copied, modified, or distributed

-// except according to those terms.

-//! Types which can be used to tune the behavior of `BufReader`.

-//!

-//! Some simple strategies are provided for your convenience. You may prefer to create your own

-//! types and implement the traits for them instead.

-

-use super::Buffer;

-

-use std::fmt;

-

-/// The default `ReadStrategy` for this crate.

-pub type DefaultReadStrategy = IfEmpty;

-/// The default `MoveStrategy` for this crate.

-pub type DefaultMoveStrategy = AtEndLessThan1k;

-/// The default `FlushStrategy` for this crate.

-pub type DefaultFlushStrategy = WhenFull;

-

-/// Trait for types which `BufReader` can consult to determine when it should read more data into the

-/// buffer.

-pub trait ReadStrategy: Default + fmt::Debug {

-    /// Returns `true` if the buffer should read more data, `false` otherwise.

-    fn should_read(&self, buffer: &Buffer) -> bool;

-}

-

-/// A `ReadStrategy` which tells the buffer to read more data only when empty.

-///

-/// Default behavior of `std::io::BufReader`.

-#[derive(Debug, Default)]

-pub struct IfEmpty;

-

-impl ReadStrategy for IfEmpty {

-    #[inline]

-    fn should_read(&self, buffer: &Buffer) -> bool {

-        buffer.buffered() == 0

-    }

-}

-

-/// A `ReadStrategy` which returns `true` if there is fewer bytes in the buffer

-/// than the provided value.

-#[derive(Debug, Default)]

-pub struct LessThan(pub usize);

-

-impl ReadStrategy for LessThan { 

-    fn should_read(&self, buffer: &Buffer) -> bool { 

-        buffer.buffered() < self.0

-    }

-}

-

-/// Trait for types which `BufReader` can consult to determine when it should move data

-/// to the beginning of the buffer.

-///

-/// **Note**: If the buffer is empty, the next read will start at the beginning of the buffer

-/// regardless of the provided strategy.

-pub trait MoveStrategy: Default + fmt::Debug {

-    /// Returns `true` if the buffer should move the data down to the beginning, 

-    /// `false` otherwise.

-    fn should_move(&self, buffer: &Buffer) -> bool;

-}

-

-/// A `MoveStrategy` which tells the buffer to move data if there is no more room at the end

-/// of the buffer, *and* if there is less than **1 KiB** of valid data in the buffer.

-///

-/// This avoids excessively large copies while still making room for more reads when appropriate.

-///

-/// Use the `AtEndLessThan` type to set a different threshold.

-#[derive(Debug, Default)]

-pub struct AtEndLessThan1k;

-

-impl MoveStrategy for AtEndLessThan1k { 

-    #[inline]

-    fn should_move(&self, buffer: &Buffer) -> bool { 

-        buffer.headroom() == 0 && buffer.buffered() < 1024

-    }

-}

-

-/// A `MoveStrategy` which triggers if there is no more room at the end of the buffer,

-/// *and* there are fewer valid bytes in the buffer than the provided value.

-///

-/// `AtEndLessThan(1)` is equivalent to `AtEnd`.

-/// `AtEndLessThan(1024)` is equivalent to `AtEndLessThan1k`.

-#[derive(Debug, Default)]

-pub struct AtEndLessThan(pub usize);

-

-impl MoveStrategy for AtEndLessThan { 

-    fn should_move(&self, buffer: &Buffer) -> bool {

-        buffer.headroom() == 0 && buffer.buffered() < self.0

-    }

-}

-

-/// A `MoveStrategy` which always returns `false`. Use this to restore original

-/// `std::io::BufReader` behavior.

-#[derive(Debug, Default)]

-pub struct NeverMove;

-

-impl MoveStrategy for NeverMove {

-    #[inline]

-    fn should_move(&self, _: &Buffer) -> bool {

-        false

-    }

-}

-

-/// A trait which tells `BufWriter` when to flush.

-pub trait FlushStrategy: Default + fmt::Debug {

-    /// Return `true` if the buffer should be flushed before reading into it.

-    ///

-    /// The buffer is provided, as well as `incoming` which is

-    /// the size of the buffer that will be written to the `BufWriter`.

-    fn flush_before(&self, _buf: &Buffer, _incoming: usize) -> bool;

-

-    /// Return `true` if the buffer should be flushed after reading into it.

-    ///

-    /// `buf` references the updated buffer after the read.

-    ///

-    /// Default impl is a no-op.

-    fn flush_after(&self, _buf: &Buffer) -> bool {

-        false

-    }

-}

-

-/// Flush the buffer if there is no more headroom. Equivalent to the behavior or

-/// `std::io::BufWriter`.

-#[derive(Debug, Default)]

-pub struct WhenFull;

-

-impl FlushStrategy for WhenFull {

-    fn flush_before(&self, buf: &Buffer, incoming: usize) -> bool {

-        buf.headroom() < incoming

-    }

-}

-

-/// Flush the buffer if it contains at least the given number of bytes.

-#[derive(Debug, Default)]

-pub struct FlushAtLeast(pub usize);

-

-impl FlushStrategy for FlushAtLeast {

-    fn flush_before(&self, buf: &Buffer, _: usize) -> bool {

-        buf.buffered() > self.0

-    }

-}

-

-/// Flush the buffer if it contains at least `8Kb (8192b)`.

-#[derive(Debug, Default)]

-pub struct FlushAtLeast8k;

-

-impl FlushStrategy for FlushAtLeast8k {

-    fn flush_before(&self, buf: &Buffer, _: usize) -> bool {

-        buf.buffered() > 8192

-    }

-}

-

-/// Flush the buffer if it contains the given byte.

-///

-/// Only scans the buffer after reading. Searches from the end first.

-#[derive(Debug, Default)]

-pub struct FlushOn(pub u8);

-

-impl FlushStrategy for FlushOn {

-    /// Same as `WhenFull`.

-    fn flush_before(&self, buf: &Buffer, incoming: usize) -> bool {

-        buf.headroom() < incoming

-    }

-

-    fn flush_after(&self, buf: &Buffer) -> bool {

-        ::memchr::memrchr(self.0, buf.buf()).is_some()

-    }

-}

-

-/// Flush the buffer if it contains a newline (`\n`).

-///

-/// Equivalent to `FlushOn(b'\n')`.

-#[derive(Debug, Default)]

-pub struct FlushOnNewline;

-

-impl FlushStrategy for FlushOnNewline {

-    /// Same as `WhenFull`.

-    fn flush_before(&self, buf: &Buffer, incoming: usize) -> bool {

-        buf.headroom() < incoming

-    }

-

-    fn flush_after(&self, buf: &Buffer) -> bool {

-        ::memchr::memrchr(b'\n', buf.buf()).is_some()

-    }

-}

diff --git a/rustc_deps/vendor/buf_redux/src/tests.rs b/rustc_deps/vendor/buf_redux/src/tests.rs
deleted file mode 100644
index 1189d8f..0000000
--- a/rustc_deps/vendor/buf_redux/src/tests.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2016 Austin Bonander <austin.bonander@gmail.com>

-//

-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or

-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license

-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your

-// option. This file may not be copied, modified, or distributed

-// except according to those terms.

-

-use super::Buffer;

-

-use std::io::Cursor;

-

-#[test]

-fn read_into_full() {

-    let mut buffer = Buffer::with_capacity(1);

-

-    assert_eq!(buffer.capacity(), 1);

-

-    let mut bytes = Cursor::new([1u8, 2]);

-

-    // Result<usize, io::Error> does not impl PartialEq

-    assert_eq!(buffer.read_from(&mut bytes).unwrap(), 1);

-    assert_eq!(buffer.read_from(&mut bytes).unwrap(), 0);

-}
\ No newline at end of file
diff --git a/rustc_deps/vendor/multipart/.cargo-checksum.json b/rustc_deps/vendor/multipart/.cargo-checksum.json
index 47b1718..66e7aff 100644
--- a/rustc_deps/vendor/multipart/.cargo-checksum.json
+++ b/rustc_deps/vendor/multipart/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"ed672d728ae0a4f81fedafd7d458b3320724baef61b26d06c067f84867f97156","LICENSE":"01bec9735cfa1b63c23626da1201dde1ab7afd6a43e83fad3a40a27bbf89b19d","LICENSE-APACHE":"7cfd738c53d61c79f07e348f622bf7707c9084237054d37fbe07788a75f5881c","LICENSE-MIT":"64e1c5f067ee2d8c0ee8abd751e57275d4e97e4f7c05699bc23f5005a9c53043","README.md":"23ce2b66a090b5d3c00b658811a3ed9fb4f8fad5562fdd2e0815b54da98275eb","examples/README.md":"271a17c60165e1886ecbc9f9a7d51ae78128ba59bec980250bded5a8bcaec66e","examples/hyper_client.rs":"48ff35f0d9bbd881f826733d8bbf6bee39dac287dfb0aff3de143e9cdda49a4e","examples/hyper_reqbuilder.rs":"06c78f1f14522445c1cfe7360b883c00a48fc8a2ed45b58a7070923b84f81c7a","examples/hyper_server.rs":"e7e32c274113eeca259812ad15628b1142341518393bdd9494642652cc306dab","examples/iron.rs":"6eed42b670b3b46609ee7838634a77a2f9d2062f0ac7d4a9ef220adcf0c41913","examples/iron_intercept.rs":"15283d0ea5070e01b6f9e1daaf1d05de3cac7b7a412296c6387565d20d322845","examples/nickel.rs":"e42840aedcbb17d435cd197572f5bcc08997afd0e1a35138bc3eb0430235373f","examples/tiny_http.rs":"3fcedab3ed26cb8b58adbcc243c7eedd9c89e72ca946aea7f76b078f55f17885","fuzz_server.sh":"e20ba929718a50c3a9b3996ae478d371ee0f7c3113f899934c3c834724456db2","lorem_ipsum.txt":"abba5a928dd1e4ac2f405b357bf33bd51a9a7b67709cfc1efeb5cd94d7e9f6b8","src/bin/test_multipart.rs":"e5853caa1cd253ea2b55edffc245c4067f7cb8e16dd9a7b67efce1b7e90d25b4","src/client/hyper.rs":"51ccf884128957dc9d24c70e5163010d05afe57044f6bd43c9c06a87d05029b4","src/client/lazy.rs":"0a0cf3737b4c256430038dd0fbd28466135302032b42f855fbaa58f26d306704","src/client/mod.rs":"feb9384b54b4c1d509cd12e082c34f76daf52b54bf3c65f1ee66200a7f9c5d55","src/client/sized.rs":"bdaa6a89004f0663566b1d52f71cb5d85b54979c82f9c254f44865aeb3e0b3a8","src/lib.rs":"0d1a94ae0375f8145d1c66b764b6941777307b548ae1e272af3f98ec34deccbd","src/local_test.rs":"79aeef0aaeec8cc7d988beb6af38e874bf2a792d4299ffadfd2f49390aa2e0b3","src/mock.rs":"586ae254fc1ed2ac2b2c227f5663330b1c33859c354a92ffb29e40cbf6c8034e","src/server/boundary.rs":"ffd60513b6f5951f6a8e8bb720f696dd3f6a49a50c8e8a08f9a58a3eca9ca98f","src/server/field.rs":"6406fbeebb761369450f47577501795ee65e92595e7b9c56bd1a9150a2a24192","src/server/hyper.rs":"a8642f16e109eb172483b4c84026aa16d80706f09b451dcf42371c187347540f","src/server/iron.rs":"f05c9ed2b09045eeee3f690bb39faf1441ef2e01b1bfe62b765b33d5bb68147f","src/server/mod.rs":"a13f678bbf85ac0a21ab2cbf8b867ec6f9380d25b46741b3960636fbf5dc8467","src/server/save.rs":"d081203bf0335410abbf0ba7366b6e02883ed1f29b032cefc18f259a9213a82a","src/server/tiny_http.rs":"d0eeb763c126046ba8c6160ebc6ec9fa3b74e70481e429b09d7dc5aec06d8395"},"package":"92f54eb45230c3aa20864ccf0c277eeaeadcf5e437e91731db498dbf7fbe0ec6"}
\ No newline at end of file
+{"files":{"Cargo.toml":"b2afce51b8b5d35ea60c83b0abb354b26103b2922802de3088e2ab9e8ba2c8fa","LICENSE":"01bec9735cfa1b63c23626da1201dde1ab7afd6a43e83fad3a40a27bbf89b19d","LICENSE-APACHE":"7cfd738c53d61c79f07e348f622bf7707c9084237054d37fbe07788a75f5881c","LICENSE-MIT":"64e1c5f067ee2d8c0ee8abd751e57275d4e97e4f7c05699bc23f5005a9c53043","README.md":"2c328bdae0d5cf18d854f2b21afd15927503410df45a1b4746e0deda98cf7b1e","examples/README.md":"ade34d4cd132e5adc8f7895cd8dc62ecd6d41c0a3f531e7dc98320c799b8602e","examples/hyper_client.rs":"48ff35f0d9bbd881f826733d8bbf6bee39dac287dfb0aff3de143e9cdda49a4e","examples/hyper_reqbuilder.rs":"06c78f1f14522445c1cfe7360b883c00a48fc8a2ed45b58a7070923b84f81c7a","examples/hyper_server.rs":"a2fd8dca0843829f5e72ed9730ae64de87e1169858776493e2c74f20a7fdf82c","examples/iron.rs":"fe68b40101f6486863ea9a089aec46bd8db06e6c41f43e1a999463b8595d1e71","examples/iron_intercept.rs":"15283d0ea5070e01b6f9e1daaf1d05de3cac7b7a412296c6387565d20d322845","examples/nickel.rs":"ec55262c703e3e5cb3f888534f1dccfaa5460bfa03ab0357b13e03aaa94c03ea","examples/rocket.rs":"b2a0ae582547f18728771af19da9930f4727897b8adc5558abf134836002a822","examples/tiny_http.rs":"eb01f21113f4059b51ae7281d9d27b8c407132440599c9995446d9557185905c","fuzz_server.sh":"e20ba929718a50c3a9b3996ae478d371ee0f7c3113f899934c3c834724456db2","lorem_ipsum.txt":"abba5a928dd1e4ac2f405b357bf33bd51a9a7b67709cfc1efeb5cd94d7e9f6b8","src/bin/form_test.rs":"a1e73d72f4fdf4c3886011eec87759d6d5a3941cffbf01494f123c10e7e34b1c","src/bin/read_file.rs":"b23a57795a6290c030808d0872b51216faf099ee0cc9955008b39b7c352593fa","src/bin/test_form.html":"b9ae826b54501017c2913ad24739c8d88d757215ac6632b35528036d92f0ed58","src/client/hyper.rs":"e2235758428e52b3c4db8795d4146c4eec3029fe4ccbc4dd72a39c8dd6e91cc8","src/client/lazy.rs":"4d116626cdbd8905c59623dfec283b872732040367713173373d2cb0b012514b","src/client/mod.rs":"ec82ee42f34ef62629afa0b2f66de99be6c2325ee002028871f0277e08086558","src/client/sized.rs":"b60156398ba9ee447f709330eaa75a0f6682e8cd25a3d8610ca9255f0c40c09e","src/lib.rs":"46f54317e0c2877a3afba744b3c13a9f3c53bdaefb1cb0cc46da7b41a5c8c6e9","src/local_test.rs":"760055749e47f9fbddf5de1d31003629e2fc4bd57c9c2e87f70c64bd54f9a69e","src/mock.rs":"cbb29652df2d99ec16ff9fb4579bbe0ab03ed30933c8e2993c4a39c912a9ce62","src/server/boundary.rs":"3c031531956139b64007ec2dc01d261c527364df1969f83b8153fcab89596a08","src/server/field.rs":"6bc15f374e3d1c7c751b43cdf4e405a959ea51cc58666868bd37c412a57c5f0d","src/server/hyper.rs":"ccf73b54683af40e1b81ecf9fc61258a299b03ee6ed619b0ffb0f53c72a36f36","src/server/iron.rs":"2068fd0edd15c2b6ba59c27071705e980e1ee99078f03c0c121e0cdf4cb41909","src/server/mod.rs":"a28b413dc451e3fc15c369f566b085e1419751301b97f3158a2c2d58904bdfb7","src/server/nickel.rs":"711fe5ffef197e14dfd3d2a9745db28e067293fa03e5edda030e54138431e556","src/server/save.rs":"d0590beab3aa29464a22605aeab101b77758734284a5b3d421741c34d620866e","src/server/tiny_http.rs":"e45bdbab4fda00cf60419fbb9fd5165ac58948eaf27a49de74ff6c56e420396d"},"package":"adba94490a79baf2d6a23eac897157047008272fa3eecb3373ae6377b91eca28"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/multipart/Cargo.toml b/rustc_deps/vendor/multipart/Cargo.toml
index 65dfa3a..e40c122 100644
--- a/rustc_deps/vendor/multipart/Cargo.toml
+++ b/rustc_deps/vendor/multipart/Cargo.toml
@@ -12,19 +12,62 @@
 
 [package]
 name = "multipart"
-version = "0.13.6"
+version = "0.15.4"
 authors = ["Austin Bonander <austin.bonander@gmail.com>"]
 description = "A backend-agnostic extension for HTTP libraries that provides support for POST multipart/form-data requests on both client and server."
 documentation = "http://docs.rs/multipart/"
+readme = "README.md"
 keywords = ["form-data", "hyper", "iron", "http", "upload"]
 license = "MIT OR Apache-2.0"
 repository = "http://github.com/abonander/multipart"
+
+[[bin]]
+name = "form_test"
+required-features = ["mock", "hyper", "server"]
+
+[[example]]
+name = "hyper_client"
+required-features = ["client", "mock", "hyper"]
+
+[[example]]
+name = "hyper_reqbuilder"
+required-features = ["client", "mock", "hyper"]
+
+[[example]]
+name = "hyper_server"
+required-features = ["mock", "hyper", "server"]
+
+[[example]]
+name = "iron"
+required-features = ["mock", "iron", "server"]
+
+[[example]]
+name = "iron_intercept"
+required-features = ["mock", "iron", "server"]
+
+[[example]]
+name = "nickel"
+required-features = ["mock", "nickel", "server"]
+
+[[example]]
+name = "tiny_http"
+required-features = ["mock", "tiny_http", "server"]
+
+[[example]]
+name = "rocket"
+required-features = ["mock", "rocket", "rocket_codegen", "server"]
+[dependencies.buf_redux]
+version = "0.8"
+optional = true
+default-features = false
+
 [dependencies.clippy]
 version = ">=0.0, <0.1"
 optional = true
 
-[dependencies.tempdir]
-version = ">=0.3.4"
+[dependencies.httparse]
+version = "1.2"
+optional = true
 
 [dependencies.hyper]
 version = ">=0.9, <0.11"
@@ -35,13 +78,8 @@
 version = ">=0.4,<0.7"
 optional = true
 
-[dependencies.buf_redux]
-version = "0.6"
-optional = true
-
-[dependencies.twoway]
-version = "0.1"
-optional = true
+[dependencies.log]
+version = "0.4"
 
 [dependencies.mime]
 version = "0.2"
@@ -49,32 +87,48 @@
 [dependencies.mime_guess]
 version = "1.8"
 
-[dependencies.rand]
-version = "0.3"
-
-[dependencies.log]
-version = "0.3"
-
-[dependencies.safemem]
-version = "0.2"
+[dependencies.nickel]
+version = ">=0.10.1"
 optional = true
 
-[dependencies.tiny_http]
-version = "0.5"
-optional = true
-
-[dependencies.httparse]
+[dependencies.quick-error]
 version = "1.2"
 optional = true
-[dev-dependencies.env_logger]
+
+[dependencies.rand]
+version = "0.4"
+
+[dependencies.rocket]
+version = "0.3"
+optional = true
+
+[dependencies.rocket_codegen]
+version = "0.3"
+optional = true
+
+[dependencies.safemem]
+version = "0.3"
+optional = true
+
+[dependencies.tempdir]
 version = "0.3"
 
+[dependencies.tiny_http]
+version = "0.6"
+optional = true
+
+[dependencies.twoway]
+version = "0.1"
+optional = true
+[dev-dependencies.env_logger]
+version = "0.5"
+
 [features]
-default = ["all"]
-mock = []
-sse4 = ["nightly", "twoway/pcmp"]
-nightly = []
 bench = []
-all = ["client", "server", "hyper", "iron", "tiny_http", "mock"]
 client = []
-server = ["buf_redux", "httparse", "safemem", "twoway"]
+default = ["client", "hyper", "iron", "mock", "nickel", "server", "tiny_http"]
+mock = []
+nightly = []
+server = ["buf_redux", "httparse", "quick-error", "safemem", "twoway"]
+sse4 = ["nightly", "twoway/pcmp"]
+use_arc_str = []
diff --git a/rustc_deps/vendor/multipart/README.md b/rustc_deps/vendor/multipart/README.md
index 158e529..ba2ec62 100644
--- a/rustc_deps/vendor/multipart/README.md
+++ b/rustc_deps/vendor/multipart/README.md
@@ -2,9 +2,13 @@
 

 Client- and server-side abstractions for HTTP file uploads (POST requests with  `Content-Type: multipart/form-data`).

 

-Supports several different HTTP crates.

+Supports several different (**sync**hronous API) HTTP crates. 

+**Async**hronous (i.e. `futures`-based) API support will be provided by [multipart-async].

 

-Minimum supported Rust version: 1.17.0

+Minimum supported Rust version: 1.22.1*

+* only `mock`, `client` and `server` features, only guaranteed to compile

+

+Fully tested Rust version: 1.26.1

 

 ### [Documentation](http://docs.rs/multipart/)

 

@@ -12,35 +16,58 @@
 

 Example files demonstrating how to use `multipart` with these crates are available under [`examples/`](examples).

 

-### [Hyper](http://hyper.rs) 

+### [Hyper ![](https://img.shields.io/crates/v/hyper.svg)](https://crates.io/crates/hyper) 

 via the `hyper` feature (enabled by default). 

 

+**Note: Hyper 0.9, 0.10 (synchronous API) only**; support for asynchronous APIs will be provided by [multipart-async].

+ 

 Client integration includes support for regular `hyper::client::Request` objects via `multipart::client::Multipart`, as well

 as integration with the new `hyper::Client` API via `multipart::client::lazy::Multipart` (new in 0.5).

 

 Server integration for `hyper::server::Request` via `multipart::server::Multipart`.

 

-### [Iron](http://ironframework.io) 

+### [Iron ![](https://img.shields.io/crates/v/iron.svg)](https://crates.io/crates/iron) 

 via the `iron` feature.

 

 Provides regular server-side integration with `iron::Request` via `multipart::server::Multipart`, 

 as well as a convenient `BeforeMiddleware` implementation in `multipart::server::iron::Intercept`.

 

-### [tiny\_http](https://crates.io/crates/tiny_http/)

+### [Nickel ![](https://img.shields.io/crates/v/nickel.svg)](https://crates.io/crates/nickel) <sup>returning to `multipart` in 0.14!</sup>

+via the `nickel` feature.

+

+Provides server-side integration with `&mut nickel::Request` via `multipart::server::Multipart`. 

+

+### [tiny_http ![](https://img.shields.io/crates/v/tiny_http.svg)](https://crates.io/crates/tiny_http)

 via the `tiny_http` feature.

 

 Provides server-side integration with `tiny_http::Request` via `multipart::server::Multipart`.

 

-### [Nickel](http://nickel.rs/) 

+### [Rocket ![](https://img.shields.io/crates/v/rocket.svg)](https://crates.io/crates/rocket)

 

-**Note**: Moved to `multipart-nickel` crate, see [nickel/examples/nickel.rs](nickel/examples/nickel.rs)

-for updated integration example.

+Direct integration is not provided as the Rocket folks seem to want to handle `multipart/form-data`

+behind the scenes which would supercede any integration with `multipart`. However, an example is available

+showing how to use `multipart` on a Rocket server: [examples/rocket.rs](examples/rocket.rs)

 

-Provides server-side integration with `&mut nickel::Request` via `multipart::server::Multipart`. 

+## ⚡ Powered By ⚡

+

+### [buf_redux ![](https://img.shields.io/crates/v/buf_redux.svg)](https://crates.io/crates/buf_redux)

+

+Customizable drop-in `std::io::BufReader` replacement, created to be used in this crate.

+Needed because it can read more bytes into the buffer without the buffer being empty, necessary

+when a boundary falls across two reads. (It was easier to author a new crate than try to get this added

+to `std::io::BufReader`.)

+

+### [httparse ![](https://img.shields.io/crates/v/httparse.svg)](https://crates.io/crates/httparse)

+

+Fast, zero-copy HTTP header parsing, used to read field headers in `multipart/form-data` request bodies.

+

+### [twoway ![](https://img.shields.io/crates/v/twoway.svg)](https://crates.io/crates/twoway)

+

+Fast string and byte-string search. Used to find boundaries in the request body. SSE 4.2 acceleration available

+under the `sse42` or `twoway/pcmp` features.

 

 ## License

 

-

 Licensed under either of

 

  * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)

@@ -53,3 +80,5 @@
 Unless you explicitly state otherwise, any contribution intentionally submitted

 for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any

 additional terms or conditions.

+

+[multipart-async]: https://github.com/abonander/multipart-async

diff --git a/rustc_deps/vendor/multipart/examples/README.md b/rustc_deps/vendor/multipart/examples/README.md
index 24224ed..61a3b32 100644
--- a/rustc_deps/vendor/multipart/examples/README.md
+++ b/rustc_deps/vendor/multipart/examples/README.md
@@ -5,13 +5,13 @@
 

 These files carry the same licenses as [`multipart` itself](https://github.com/abonander/multipart#license), though this may be lightened to a copyright-free license in the near future.

 

-##Client

+## Client

 

 Examples for the client-side integrations of `multipart`'s API.

 

 [`hyper_client`](hyper_client.rs)

 ---------------------------------

-Author: [abonander][abonander]

+Author: [abonander]

 

 This example showcases usage of `multipart` with the `hyper::client::Request` API.

 

@@ -21,7 +21,7 @@
 

 [`hyper_reqbuilder`](hyper_reqbuilder.rs)

 -----------------------------------------

-Author: [abonander][abonander]

+Author: [abonander]

 

 This example showcases usage of `multipart` with Hyper's new `Client` API,

 via the lazy-writing capabilities of `multipart::client::lazy`.

@@ -30,11 +30,11 @@
 $ cargo run --example hyper_reqbuilder

 ```

 

-##Server

+## Server

 

 [`hyper_server`](hyper_server.rs)

 ---------------------------------

-Author: [Puhrez][puhrez]

+Author: [Puhrez]

 

 This example shows how to use `multipart` with a [`hyper::Server`] (http://hyper.rs/) to intercept multipart requests.

 

@@ -44,7 +44,7 @@
 

 [`iron`](iron.rs)

 -----------------

-Author: [White-Oak][white-oak]

+Author: [White-Oak]

 

 This example shows how to use `multipart` with the [Iron web application framework](http://ironframework.io/), via `multipart`'s support

 for the `iron::Request` type.

@@ -57,7 +57,7 @@
 

 [`iron_intercept`](iron_intercept.rs)

 -------------------------------------

-Author: [abonander][abonander]

+Author: [abonander]

 

 This example shows how to use `multipart`'s specialized `Intercept` middleware with Iron, which reads out all fields and

 files to local storage so they can be accessed arbitrarily.

@@ -68,7 +68,7 @@
 

 [`tiny_http`](tiny_http.rs)

 ---------------------------

-Author: [White-Oak][white-oak]

+Author: [White-Oak]

 

 This example shows how to use `multipart` with the [`tiny_http` crate](https://crates.io/crates/tiny_http), via `multipart`'s support for the `tiny_http::Request` type.

 

@@ -78,7 +78,7 @@
 

 [`hyper_server`](hyper_server.rs)

 ---------------------------------

-Author: [Puhrez][puhrez]

+Author: [Puhrez]

 

 This example shows how to use `multipart` with a [`hyper::Server`] (http://hyper.rs/) to intercept multipart requests.

 

@@ -87,8 +87,8 @@
 ```

 

 [`nickel`](nickel.rs)

----------------------------------

-Author: [iamsebastian][iamsebastian]

+---------------------

+Author: [iamsebastian]

 

 This example shows how to use `multipart` to handle multipart uploads in [nickel.rs](https://nickel.rs).

 

@@ -96,9 +96,20 @@
 $ cargo run --example nickel --features nickel

 ```

 

+[Rocket](rocket.rs)

+-------------------

+Author: [abonander]

+

+This example shows how `multipart`'s server API can be used with [Rocket](https://rocket.rs) without

+explicit support (the Rocket folks seem to want to handle `multipart/form-data` behind the scenes

+but haven't gotten around to implementing it yet; this would supercede any integration from `multipart`). 

+

+```

+$ cargo run --example rocket --features "rocket,rocket_codegen"

+```

 

 [iamsebastian]: https://github.com/iamsebastian

-[puhrez]: https://github.com/puhrez

-[white-oak]: https://github.com/white-oak

+[Puhrez]: https://github.com/puhrez

+[White-Oak]: https://github.com/white-oak

 [abonander]: https://github.com/abonander

 

diff --git a/rustc_deps/vendor/multipart/examples/hyper_server.rs b/rustc_deps/vendor/multipart/examples/hyper_server.rs
index 6c1f91f..f6d5be2 100644
--- a/rustc_deps/vendor/multipart/examples/hyper_server.rs
+++ b/rustc_deps/vendor/multipart/examples/hyper_server.rs
@@ -1,13 +1,13 @@
 extern crate hyper;

 extern crate multipart;

 

-use std::fs::File;

-use std::io::{self, Read};

+use std::io;

 use hyper::server::{Handler, Server, Request, Response};

 use hyper::status::StatusCode;

 use hyper::server::response::Response as HyperResponse;

 use multipart::server::hyper::{Switch, MultipartHandler, HyperRequest};

-use multipart::server::{Multipart, Entries, SaveResult, SavedFile};

+use multipart::server::{Multipart, Entries, SaveResult};

+use multipart::mock::StdoutTee;

 

 struct NonMultipart;

 impl Handler for NonMultipart {

@@ -19,54 +19,26 @@
 

 struct EchoMultipart;

 impl MultipartHandler for EchoMultipart {

-    fn handle_multipart(&self, mut multipart: Multipart<HyperRequest>, mut res: HyperResponse) {

-        let processing = match multipart.save().temp() {

-            SaveResult::Full(entries) => process_entries(entries),

+    fn handle_multipart(&self, mut multipart: Multipart<HyperRequest>, res: HyperResponse) {

+        match multipart.save().temp() {

+            SaveResult::Full(entries) => process_entries(res, entries).unwrap(),

             SaveResult::Partial(entries, error) => {

                 println!("Errors saving multipart:\n{:?}", error);

-                process_entries(entries.into())

+                process_entries(res, entries.into()).unwrap();

             }

             SaveResult::Error(error) => {

                 println!("Errors saving multipart:\n{:?}", error);

-                Err(error)

+                res.send(format!("An error occurred {}", error).as_bytes()).unwrap();

             }

         };

-        match processing {

-            Ok(_) => res.send(b"All good in the hood :)\n").unwrap(),

-            Err(_) => {

-                *res.status_mut() = StatusCode::BadRequest;

-                res.send(b"An error occurred :(\n").unwrap();

-            }

-        }

     }

 }

 

-fn process_entries<'a>(entries: Entries) -> io::Result<()> {

-    for (name, field) in entries.fields {

-        println!("Field {:?}: {:?}", name, field);

-    }

-

-    for (name, files) in entries.files {

-        println!("Field {:?} has {} files:", name, files.len());

-

-        for file in files {

-            print_file(&file)?;

-        }

-    }

-

-    Ok(())

-}

-

-fn print_file(saved_file: &SavedFile) -> io::Result<()> {

-    let mut file = File::open(&saved_file.path)?;

-

-    let mut contents = String::new();

-    file.read_to_string(&mut contents)?;

-

-    println!("File {:?} ({:?}):", saved_file.filename, saved_file.content_type);

-    println!("{}", contents);

-

-    Ok(())

+fn process_entries(res: HyperResponse, entries: Entries) -> io::Result<()> {

+    let mut res = res.start()?;

+    let stdout = io::stdout();

+    let out = StdoutTee::new(&mut res, &stdout);

+    entries.write_debug(out)

 }

 

 fn main() {

diff --git a/rustc_deps/vendor/multipart/examples/iron.rs b/rustc_deps/vendor/multipart/examples/iron.rs
index 10d45c6..54c5fec 100644
--- a/rustc_deps/vendor/multipart/examples/iron.rs
+++ b/rustc_deps/vendor/multipart/examples/iron.rs
@@ -3,14 +3,14 @@
 

 extern crate env_logger;

 

-use std::fs::File;

-use std::io::Read;

-use multipart::server::{Multipart, Entries, SaveResult, SavedFile};

+use std::io::{self, Write};

+use multipart::mock::StdoutTee;

+use multipart::server::{Multipart, Entries, SaveResult};

 use iron::prelude::*;

 use iron::status;

 

 fn main() {

-    env_logger::init().unwrap();

+    env_logger::init();

 

     Iron::new(process_request).http("localhost:80").expect("Could not bind localhost:80");

 }

@@ -47,38 +47,20 @@
 /// Processes saved entries from multipart request.

 /// Returns an OK response or an error.

 fn process_entries(entries: Entries) -> IronResult<Response> {

-    for (name, field) in entries.fields {

-        println!("Field {:?}: {:?}", name, field);

+    let mut data = Vec::new();

+

+    {

+        let stdout = io::stdout();

+        let tee = StdoutTee::new(&mut data, &stdout);

+        entries.write_debug(tee).map_err(|e| {

+            IronError::new(

+                e,

+                (status::InternalServerError, "Error printing request fields")

+            )

+        })?;

     }

 

-    for (name, files) in entries.files {

-        println!("Field {:?} has {} files:", name, files.len());

+    let _ = writeln!(data, "Entries processed");

 

-        for file in files {

-            print_file(&file)?;

-        }

-    }

-

-    Ok(Response::with((status::Ok, "Multipart data is processed")))

-}

-

-fn print_file(saved_file: &SavedFile) -> IronResult<()> {

-    let mut file = match File::open(&saved_file.path) {

-        Ok(file) => file,

-        Err(error) => {

-            return Err(IronError::new(error,

-                                      (status::InternalServerError,

-                                       "Server couldn't open saved file")))

-        }

-    };

-

-    let mut contents = String::new();

-    if let Err(error) = file.read_to_string(&mut contents) {

-        return Err(IronError::new(error, (status::BadRequest, "The file was not a text")));

-    }

-

-    println!("File {:?} ({:?}):", saved_file.filename, saved_file.content_type);

-    println!("{}", contents);

-

-    Ok(())

+    Ok(Response::with((status::Ok, data)))

 }

diff --git a/rustc_deps/vendor/multipart/examples/nickel.rs b/rustc_deps/vendor/multipart/examples/nickel.rs
index 8909fda..52501e5 100644
--- a/rustc_deps/vendor/multipart/examples/nickel.rs
+++ b/rustc_deps/vendor/multipart/examples/nickel.rs
@@ -1,8 +1,63 @@
-//! **Note**: in-crate integration for Nickel was removed in 0.11.0;

-//! integration will be provided in the

-//! [`multipart-nickel`](https://crates.io/crates/multipart-nickel)

-//! crate for the foreseeable future.

-//!

-//! Please see `nickel/examples/nickel.rs` for the new integration.

+extern crate multipart;

+extern crate nickel;

 

-fn main() {}

+use std::io::{self, Write};

+use nickel::{Action, HttpRouter, MiddlewareResult, Nickel, Request, Response};

+use nickel::status::StatusCode;

+

+use multipart::server::nickel::MultipartBody;

+use multipart::server::{Entries, SaveResult};

+use multipart::mock::StdoutTee;

+

+fn handle_multipart<'mw>(req: &mut Request, mut res: Response<'mw>) -> MiddlewareResult<'mw> {

+    match (*req).multipart_body() {

+        Some(mut multipart) => {

+            match multipart.save().temp() {

+                SaveResult::Full(entries) => process_entries(res, entries),

+

+                SaveResult::Partial(entries, e) => {

+                    println!("Partial errors ... {:?}", e);

+                    return process_entries(res, entries.keep_partial());

+                },

+

+                SaveResult::Error(e) => {

+                    println!("There are errors in multipart POSTing ... {:?}", e);

+                    res.set(StatusCode::InternalServerError);

+                    return res.send(format!("Server could not handle multipart POST! {:?}", e));

+                },

+            }

+        }

+        None => {

+            res.set(StatusCode::BadRequest);

+            return res.send("Request seems not was a multipart request")

+        }

+    }

+}

+

+/// Processes saved entries from multipart request.

+/// Returns an OK response or an error.

+fn process_entries<'mw>(res: Response<'mw>, entries: Entries) -> MiddlewareResult<'mw> {

+    let stdout = io::stdout();

+    let mut res = res.start()?;

+    if let Err(e) = entries.write_debug(StdoutTee::new(&mut res, &stdout)) {

+        writeln!(res, "Error while reading entries: {}", e).expect("writeln");

+    }

+

+    Ok(Action::Halt(res))

+}

+

+fn main() {

+    let mut srv = Nickel::new();

+

+    srv.post("/multipart_upload/", handle_multipart);

+

+    // Start this example via:

+    //

+    // `cargo run --example nickel --features nickel`

+    //

+    // And - if you are in the root of this repository - do an example

+    // upload via:

+    //

+    // `curl -F file=@LICENSE 'http://localhost:6868/multipart_upload/'`

+    srv.listen("127.0.0.1:6868").expect("Failed to bind server");

+}

diff --git a/rustc_deps/vendor/multipart/examples/rocket.rs b/rustc_deps/vendor/multipart/examples/rocket.rs
new file mode 100644
index 0000000..af8aa58
--- /dev/null
+++ b/rustc_deps/vendor/multipart/examples/rocket.rs
@@ -0,0 +1,84 @@
+// Example usage with Rocket (https://rocket.rs)

+//

+// Direct integration is not provided at this time as it appears the Rocket folks would prefer

+// to handle multipart requests behind the scenes.

+#![feature(plugin)]

+#![plugin(rocket_codegen)]

+

+extern crate multipart;

+extern crate rocket;

+

+use multipart::mock::StdoutTee;

+use multipart::server::Multipart;

+use multipart::server::save::Entries;

+use multipart::server::save::SaveResult::*;

+

+use rocket::Data;

+use rocket::http::{ContentType, Status};

+use rocket::response::Stream;

+use rocket::response::status::Custom;

+

+use std::io::{self, Cursor, Write};

+

+#[post("/upload", data = "<data>")]

+// signature requires the request to have a `Content-Type`

+fn multipart_upload(cont_type: &ContentType, data: Data) -> Result<Stream<Cursor<Vec<u8>>>, Custom<String>> {

+    // this and the next check can be implemented as a request guard but it seems like just

+    // more boilerplate than necessary

+    if !cont_type.is_form_data() {

+        return Err(Custom(

+            Status::BadRequest,

+            "Content-Type not multipart/form-data".into()

+        ));

+    }

+

+    let (_, boundary) = cont_type.params().find(|&(k, _)| k == "boundary").ok_or_else(

+            || Custom(

+                Status::BadRequest,

+                "`Content-Type: multipart/form-data` boundary param not provided".into()

+            )

+        )?;

+

+    match process_upload(boundary, data) {

+        Ok(resp) => Ok(Stream::from(Cursor::new(resp))),

+        Err(err) => Err(Custom(Status::InternalServerError, err.to_string()))

+    }

+}

+

+fn process_upload(boundary: &str, data: Data) -> io::Result<Vec<u8>> {

+    let mut out = Vec::new();

+

+    // saves all fields, any field longer than 10kB goes to a temporary directory

+    // Entries could implement FromData though that would give zero control over

+    // how the files are saved; Multipart would be a good impl candidate though

+    match Multipart::with_body(data.open(), boundary).save().temp() {

+        Full(entries) => process_entries(entries, &mut out)?,

+        Partial(partial, reason) => {

+            writeln!(out, "Request partially processed: {:?}", reason)?;

+            if let Some(field) = partial.partial {

+                writeln!(out, "Stopped on field: {:?}", field.source.headers)?;

+            }

+

+            process_entries(partial.entries, &mut out)?

+        },

+        Error(e) => return Err(e),

+    }

+

+    Ok(out)

+}

+

+// having a streaming output would be nice; there's one for returning a `Read` impl

+// but not one that you can `write()` to

+fn process_entries(entries: Entries, mut out: &mut Vec<u8>) -> io::Result<()> {

+    {

+        let stdout = io::stdout();

+        let tee = StdoutTee::new(&mut out, &stdout);

+        entries.write_debug(tee)?;

+    }

+

+    writeln!(out, "Entries processed")

+}

+

+fn main() {

+    rocket::ignite().mount("/", routes![multipart_upload]).launch();

+}

diff --git a/rustc_deps/vendor/multipart/examples/tiny_http.rs b/rustc_deps/vendor/multipart/examples/tiny_http.rs
index ca20e05..f1159b5 100644
--- a/rustc_deps/vendor/multipart/examples/tiny_http.rs
+++ b/rustc_deps/vendor/multipart/examples/tiny_http.rs
@@ -1,9 +1,9 @@
 extern crate tiny_http;

 extern crate multipart;

 

-use std::fs::File;

-use std::io::{self, Read};

-use multipart::server::{Multipart, Entries, SaveResult, SavedFile};

+use std::io::{self, Cursor, Write};

+use multipart::server::{Multipart, Entries, SaveResult};

+use multipart::mock::StdoutTee;

 use tiny_http::{Response, StatusCode, Request};

 fn main() {

     // Starting a server on `localhost:80`

@@ -27,8 +27,10 @@
     }

 }

 

+type RespBody = Cursor<Vec<u8>>;

+

 /// Processes a request and returns response or an occured error.

-fn process_request<'a, 'b>(request: &'a mut Request) -> io::Result<Response<&'b [u8]>> {

+fn process_request(request: &mut Request) -> io::Result<Response<RespBody>> {

     // Getting a multipart reader wrapper

     match Multipart::from_request(request) {

         Ok(mut multipart) => {

@@ -51,40 +53,26 @@
 

 /// Processes saved entries from multipart request.

 /// Returns an OK response or an error.

-fn process_entries<'a>(entries: Entries) -> io::Result<Response<&'a [u8]>> {

-    for (name, field) in entries.fields {

-        println!("Field {:?}: {:?}", name, field);

+fn process_entries(entries: Entries) -> io::Result<Response<RespBody>> {

+    let mut data = Vec::new();

+

+    {

+        let stdout = io::stdout();

+        let tee = StdoutTee::new(&mut data, &stdout);

+        entries.write_debug(tee)?;

     }

 

-    for (name, files) in entries.files {

-        println!("Field {:?} has {} files:", name, files.len());

+    writeln!(data, "Entries processed")?;

 

-        for file in files {

-            print_file(&file)?;

-        }

-    }

-

-    Ok(build_response(200, "Multipart data is received!"))

+    Ok(build_response(200, data))

 }

 

-fn print_file(saved_file: &SavedFile) -> io::Result<()> {

-    let mut file = File::open(&saved_file.path)?;

-

-    let mut contents = String::new();

-    file.read_to_string(&mut contents)?;

-

-    println!("File {:?} ({:?}):", saved_file.filename, saved_file.content_type);

-    println!("{}", contents);

-

-    Ok(())

-}

-

-/// A utility function to build responses using only two arguments

-fn build_response(status_code: u16, response: &str) -> Response<&[u8]> {

-    let bytes = response.as_bytes();

+fn build_response<D: Into<Vec<u8>>>(status_code: u16, data: D) -> Response<RespBody> {

+    let data = data.into();

+    let data_len = data.len();

     Response::new(StatusCode(status_code),

                   vec![],

-                  bytes,

-                  Some(bytes.len()),

+                  Cursor::new(data),

+                  Some(data_len),

                   None)

 }

diff --git a/rustc_deps/vendor/multipart/src/bin/form_test.rs b/rustc_deps/vendor/multipart/src/bin/form_test.rs
new file mode 100644
index 0000000..8c11b1a
--- /dev/null
+++ b/rustc_deps/vendor/multipart/src/bin/form_test.rs
@@ -0,0 +1,32 @@
+extern crate hyper;

+extern crate multipart;

+

+use multipart::server::Multipart;

+

+use hyper::header::ContentType;

+use hyper::server::*;

+

+use std::fs::File;

+use std::io;

+

+fn main() {

+    let listening = Server::http("127.0.0.1:0").expect("failed to bind socket")

+        .handle(read_multipart).expect("failed to handle request");

+

+    println!("bound socket to: {}", listening.socket);

+}

+

+fn read_multipart(req: Request, mut resp: Response) {

+    if let Ok(mut multipart) = Multipart::from_request(req) {

+        multipart.foreach_entry(|_| {})

+            .map_err(|e| println!("error handling field: {}", e));

+    }

+

+    let mut file = File::open("src/bin/test_form.html")

+        .expect("failed to open src/bind/test_form.html");

+

+    resp.headers_mut().set(ContentType("text/html".parse().unwrap()));

+

+    let mut resp = resp.start().expect("failed to open response");

+    io::copy(&mut file, &mut resp).expect("failed to write response");

+}

diff --git a/rustc_deps/vendor/multipart/src/bin/test_multipart.rs b/rustc_deps/vendor/multipart/src/bin/read_file.rs
similarity index 76%
rename from rustc_deps/vendor/multipart/src/bin/test_multipart.rs
rename to rustc_deps/vendor/multipart/src/bin/read_file.rs
index 09dd7c2..3800849 100644
--- a/rustc_deps/vendor/multipart/src/bin/test_multipart.rs
+++ b/rustc_deps/vendor/multipart/src/bin/read_file.rs
@@ -2,8 +2,6 @@
 extern crate multipart;

 extern crate rand;

 

-use log::{LogRecord, LogMetadata, LogLevelFilter};

-

 use multipart::server::Multipart;

 

 use rand::{Rng, ThreadRng};

@@ -12,28 +10,29 @@
 use std::env;

 use std::io::{self, Read};

 

-const LOG_LEVEL: LogLevelFilter = LogLevelFilter::Debug;

+const LOG_LEVEL: log::LevelFilter = log::LevelFilter::Debug;

 

 struct SimpleLogger;

 

 impl log::Log for SimpleLogger {

-    fn enabled(&self, metadata: &LogMetadata) -> bool {

-        LOG_LEVEL.to_log_level()

+    fn enabled(&self, metadata: &log::Metadata) -> bool {

+        LOG_LEVEL.to_level()

             .map_or(false, |level| metadata.level() <= level)

     }

 

-    fn log(&self, record: &LogRecord) {

+    fn log(&self, record: &log::Record) {

         if self.enabled(record.metadata()) {

             println!("{} - {}", record.level(), record.args());

         }

     }

+    

+    fn flush(&self) {}

 }

 

+static LOGGER: SimpleLogger = SimpleLogger;

+

 fn main() {

-    log::set_logger(|max_log_level| {

-        max_log_level.set(LOG_LEVEL);

-        Box::new(SimpleLogger)

-    }).expect("Could not initialize logger");

+    log::set_logger(&LOGGER).expect("Could not initialize logger");

 

     let mut args = env::args().skip(1);

 

@@ -51,7 +50,7 @@
     let mut multipart = Multipart::with_body(reader, boundary);

 

     while let Some(field) = multipart.read_entry().unwrap() {

-        println!("Read field: {:?}", field.name);

+        println!("Read field: {:?}", field.headers.name);

     }

 

     println!("All entries read!");

@@ -73,4 +72,4 @@
 

         self.inner.read(&mut buf[..len])

     }

-}
\ No newline at end of file
+}

diff --git a/rustc_deps/vendor/multipart/src/bin/test_form.html b/rustc_deps/vendor/multipart/src/bin/test_form.html
new file mode 100644
index 0000000..4fb0a4e
--- /dev/null
+++ b/rustc_deps/vendor/multipart/src/bin/test_form.html
@@ -0,0 +1,14 @@
+<!DOCTYPE html>

+<html lang="en" xmlns="http://www.w3.org/1999/html">

+<head>

+    <meta charset="UTF-8">

+    <title>Multipart-Async Form Test</title>

+</head>

+<body>

+    <form method="post" enctype="multipart/form-data">

+        <!-- <input type = text name = "text_field" />

+        <input type = "file" name = "file" /> -->

+        <input type = "submit" />

+    </form>

+</body>

+</html>

diff --git a/rustc_deps/vendor/multipart/src/client/hyper.rs b/rustc_deps/vendor/multipart/src/client/hyper.rs
index e832883..c4e10ed 100644
--- a/rustc_deps/vendor/multipart/src/client/hyper.rs
+++ b/rustc_deps/vendor/multipart/src/client/hyper.rs
@@ -20,7 +20,7 @@
 

 use hyper::Error as HyperError;

 

-use mime::{Mime, TopLevel, SubLevel, Attr, Value};

+use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};

 

 use super::{HttpRequest, HttpStream};

 

diff --git a/rustc_deps/vendor/multipart/src/client/lazy.rs b/rustc_deps/vendor/multipart/src/client/lazy.rs
index 89cd33b..b9aa936 100644
--- a/rustc_deps/vendor/multipart/src/client/lazy.rs
+++ b/rustc_deps/vendor/multipart/src/client/lazy.rs
@@ -1,7 +1,4 @@
 //! Multipart requests which write out their data in one fell swoop.

-

-use log::LogLevel;

-

 use mime::Mime;

 

 use std::borrow::Cow;

@@ -11,9 +8,9 @@
 

 use std::io::prelude::*;

 use std::io::Cursor;

-use std::{fmt, io, mem};

+use std::{fmt, io};

 

-use super::{HttpRequest, HttpStream, MultipartWriter};

+use super::{HttpRequest, HttpStream};

 

 macro_rules! try_lazy (

     ($field:expr, $try:expr) => (

@@ -268,7 +265,7 @@
                     use_len = false;

 

                     streams.push(

-                        PreparedField::from_stream(&field.name, &boundary, stream.content_type,

+                        PreparedField::from_stream(&field.name, &boundary, &stream.content_type,

                                                    stream.filename.as_ref().map(|f| &**f),

                                                    stream.stream));

                 },

@@ -286,7 +283,7 @@
 

         Ok(PreparedFields {

             text_data: Cursor::new(text_data),

-            streams: streams,

+            streams,

             end_boundary: Cursor::new(boundary),

             content_len: if use_len { Some(content_len) } else { None } ,

         })

@@ -350,14 +347,14 @@
         let file = try_lazy!(name, File::open(path));

         let content_len = try_lazy!(name, file.metadata()).len();

 

-        let stream = Self::from_stream(&name, boundary, content_type, filename, Box::new(file));

+        let stream = Self::from_stream(&name, boundary, &content_type, filename, Box::new(file));

 

         let content_len = content_len + (stream.header.get_ref().len() as u64);

 

         Ok((stream, content_len))

     }

 

-    fn from_stream(name: &str, boundary: &str, content_type: Mime, filename: Option<&str>, stream: Box<Read + 'd>) -> Self {

+    fn from_stream(name: &str, boundary: &str, content_type: &Mime, filename: Option<&str>, stream: Box<Read + 'd>) -> Self {

         let mut header = Vec::new();

 

         write!(header, "{}\r\nContent-Disposition: form-data; name=\"{}\"",

@@ -371,7 +368,7 @@
 

         PreparedField {

             header: Cursor::new(header),

-            stream: stream

+            stream,

         }

     }

 }

@@ -397,14 +394,6 @@
     }

 }

 

-struct CowStrAsRef<'d>(Cow<'d, str>);

-

-impl<'d> AsRef<[u8]> for CowStrAsRef<'d> {

-    fn as_ref(&self) -> &[u8] {

-        self.0.as_bytes()

-    }

-}

-

 /// Conversion trait necessary for `Multipart::add_file()` to accept borrowed or owned strings

 /// and borrowed or owned paths

 pub trait IntoCowPath<'a> {

diff --git a/rustc_deps/vendor/multipart/src/client/mod.rs b/rustc_deps/vendor/multipart/src/client/mod.rs
index ca6fe6d..5af6fb3 100644
--- a/rustc_deps/vendor/multipart/src/client/mod.rs
+++ b/rustc_deps/vendor/multipart/src/client/mod.rs
@@ -198,7 +198,7 @@
 impl<'a, W: Write> MultipartWriter<'a, W> {

     fn new<B: Into<Cow<'a, str>>>(inner: W, boundary: B) -> Self {

         MultipartWriter {

-            inner: inner,

+            inner,

             boundary: boundary.into(),

             data_written: false,

         }

@@ -251,16 +251,13 @@
         }

     }

 

-    fn inner_mut(&mut self) -> &mut W {

-        &mut self.inner

-    }

-

     fn finish(mut self) -> io::Result<W> {

         if self.data_written {

-            // Write two hyphens after the last boundary occurrence.

-            write!(self.inner, "\r\n--{}--", self.boundary)?;

+            self.inner.write_all(b"\r\n")?;

         }

 

+        // always write the closing boundary, even for empty bodies

+        write!(self.inner, "--{}--", self.boundary)?;

         Ok(self.inner)

     }

 }

diff --git a/rustc_deps/vendor/multipart/src/client/sized.rs b/rustc_deps/vendor/multipart/src/client/sized.rs
index ef16e7b..a9c2a2a 100644
--- a/rustc_deps/vendor/multipart/src/client/sized.rs
+++ b/rustc_deps/vendor/multipart/src/client/sized.rs
@@ -85,6 +85,6 @@
 

         let mut req = self.inner.open_stream()?;

         io::copy(&mut &self.buffer[..], &mut req)?;

-        req.finish().into()

+        req.finish()

     }

 }

diff --git a/rustc_deps/vendor/multipart/src/lib.rs b/rustc_deps/vendor/multipart/src/lib.rs
index a30fd5e..c82a8ab 100644
--- a/rustc_deps/vendor/multipart/src/lib.rs
+++ b/rustc_deps/vendor/multipart/src/lib.rs
@@ -16,19 +16,26 @@
 //! * `mock`: Provides mock implementations of core `client` and `server` traits for debugging

 //! or non-standard use.

 //!

-//! * `hyper`: Integration with the [Hyper](https://github.com/hyperium/hyper) HTTP library

+//! * `hyper`: Integration with the [Hyper](https://crates.io/crates/hyper) HTTP library

 //! for client and/or server depending on which other feature flags are set.

 //!

-//! * `iron`: Integration with the [Iron](http://ironframework.io) web application

+//! * `iron`: Integration with the [Iron](http://crates.io/crates/iron) web application

 //! framework. See the [`server::iron`](server/iron/index.html) module for more information.

 //!

-//! * `tiny_http`: Integration with the [`tiny_http`](https://github.com/frewsxcv/tiny-http)

+//! * `nickel` (returning in 0.14!): Integration with the [Nickel](https://crates.io/crates/nickel)

+//! web application framework. See the [`server::nickel`](server/nickel/index.html) module for more

+//! information.

+//!

+//! * `tiny_http`: Integration with the [`tiny_http`](https://crates.io/crates/tiny_http)

 //! crate. See the [`server::tiny_http`](server/tiny_http/index.html) module for more information.

 //!

-//! **Note**: in-crate integration for Nickel was removed in 0.11.0;

-//! integration will be provided in the

-//! [`multipart-nickel`](https://crates.io/crates/multipart-nickel)

-//! crate for the foreseeable future.

+//! ### Note: Work in Progress

+//! I have left a number of Request-for-Comments (RFC) questions on various APIs and other places

+//! in the code as there are some cases where I'm not sure what the desirable behavior is.

+//!

+//! I have opened an issue as a place to collect responses and discussions for these questions

+//! [on Github](https://github.com/abonander/multipart/issues/96). Please quote the RFC-statement

+//! (and/or link to its source line) and provide your feedback there.

 #![cfg_attr(feature="clippy", feature(plugin))]

 #![cfg_attr(feature="clippy", plugin(clippy))]

 #![cfg_attr(feature="clippy", deny(clippy))]

@@ -38,16 +45,16 @@
 #[macro_use]

 extern crate log;

 

-#[cfg(test)]

-extern crate env_logger;

-

-#[cfg_attr(test, macro_use)]

+#[macro_use]

 extern crate mime;

-

 extern crate mime_guess;

 extern crate rand;

 extern crate tempdir;

 

+#[cfg(feature = "quick-error")]

+#[macro_use]

+extern crate quick_error;

+

 #[cfg(feature = "server")]

 extern crate safemem;

 

@@ -60,6 +67,9 @@
 #[cfg(feature = "tiny_http")]

 extern crate tiny_http;

 

+#[cfg(test)]

+extern crate env_logger;

+

 #[cfg(any(feature = "mock", test))]

 pub mod mock;

 

@@ -111,3 +121,8 @@
 fn random_alphanumeric(len: usize) -> String {

     rand::thread_rng().gen_ascii_chars().take(len).collect()

 }

+

+#[cfg(test)]

+fn init_log() {

+    let _ = env_logger::try_init();

+}
\ No newline at end of file
diff --git a/rustc_deps/vendor/multipart/src/local_test.rs b/rustc_deps/vendor/multipart/src/local_test.rs
index 45514c0..0bbbf82 100644
--- a/rustc_deps/vendor/multipart/src/local_test.rs
+++ b/rustc_deps/vendor/multipart/src/local_test.rs
@@ -6,13 +6,14 @@
 // copied, modified, or distributed except according to those terms.

 use mock::{ClientRequest, HttpBuffer};

 

-use server::{MultipartField, MultipartData, ReadEntry};

+use server::{MultipartField, ReadEntry, FieldHeaders};

 

-use mime::Mime;

+use mime::{self, Mime};

 

 use rand::{self, Rng};

 

 use std::collections::{HashMap, HashSet};

+use std::collections::hash_map::{Entry, OccupiedEntry};

 use std::fmt;

 use std::io::prelude::*;

 use std::io::Cursor;

@@ -39,65 +40,91 @@
     );

 );

 

+/// The error is provided as the `err` format argument

+macro_rules! expect_ok_fmt (

+    ($val:expr, $($args:tt)*) => (

+        match $val {

+            Ok(val) => val,

+            Err(e) => panic!($($args)*, err=e),

+        }

+    );

+);

+

+fn get_field<'m, V>(field: &FieldHeaders, fields: &'m mut HashMap<String, V>) -> Option<OccupiedEntry<'m, String, V>> {

+    match fields.entry(field.name.to_string()) {

+        Entry::Occupied(occupied) => Some(occupied),

+        Entry::Vacant(_) => None,

+    }

+}

+

 #[derive(Debug)]

 struct TestFields {

-    texts: HashMap<String, String>,

+    texts: HashMap<String, HashSet<String>>,

     files: HashMap<String, HashSet<FileEntry>>,

 }

 

 impl TestFields {

     fn gen() -> Self {

         TestFields {

-            texts: collect_rand(|| (gen_string(), gen_string())),

+            texts: collect_rand(|| (gen_string(), collect_rand(gen_string))),

             files: collect_rand(|| (gen_string(), FileEntry::gen_many())),

         }

     }

 

-    fn check_field<M: ReadEntry>(&mut self, field: &mut MultipartField<M>) {

-        match field.data {

-            MultipartData::Text(ref text) => {

-                let test_text = expect_fmt!(self.texts.remove(&field.name),

-                    "Got text field that wasn't in original dataset: {:?} : {:?} ",

-                    field.name, text.text

-                );

+    fn check_field<M: ReadEntry>(&mut self, mut field: MultipartField<M>) -> M {

+        // text/plain fields would be considered a file by `TestFields`

+        if field.headers.content_type.is_none() {

+            let mut text_entries = expect_fmt!(get_field(&field.headers, &mut self.texts),

+                                        "Got text field that wasn't in original dataset: {:?}",

+                                        field.headers);

 

-                assert!(

-                    text.text == test_text,

-                    "Unexpected data for field {:?}: Expected {:?}, got {:?}",

-                    field.name, test_text, text.text

-                );

-            },

-            MultipartData::File(ref mut file) => {

-                let mut bytes = Vec::with_capacity(MAX_LEN);

-                file.read_to_end(&mut bytes).unwrap();

+            let mut text = String::new();

+            expect_ok_fmt!(

+                field.data.read_to_string(&mut text),

+                "error failed to read text data to string: {:?}\n{err}", field.headers

+            );

 

-                let curr_file = FileEntry {

-                    content_type: file.content_type.clone(),

-                    filename: file.filename.take(),

-                    data: PrintHex(bytes),

-                };

+            assert!(

+                text_entries.get_mut().remove(&text),

+                "Got field text data that wasn't in original data set: {:?}\n{:?}\n{:?}",

+                field.headers,

+                text,

+                text_entries.get(),

+            );

 

-                let files_empty = {

-                    let mut files = expect_fmt!(self.files.get_mut(&field.name),

-                    "Got file field that wasn't in original dataset: {:?} : {:?}",

-                    field.name, curr_file);

+            if text_entries.get().is_empty() {

+                text_entries.remove_entry();

+            }

 

-                    assert!(files.remove(&curr_file), "Unexpected data for file field {:?}: {:?}",

-                        field.name, curr_file);

-

-                    files.is_empty()

-                };

-

-                if files_empty {

-                    let _ = self.files.remove(&field.name);

-                }

-            },

+            return field.data.into_inner();

         }

+

+

+        let mut file_entries = expect_fmt!(get_field(&field.headers, &mut self.files),

+                                        "Got file field that wasn't in original dataset: {:?}",

+                                        field.headers);

+

+        let field_name = field.headers.name.clone();

+        let (test_entry, inner) = FileEntry::from_field(field);

+

+        assert!(

+            file_entries.get_mut().remove(&test_entry),

+            "Got field entry that wasn't in original dataset: name: {:?}\n{:?}\nEntries: {:?}",

+            field_name,

+            test_entry,

+            file_entries.get()

+        );

+

+        if file_entries.get().is_empty() {

+            file_entries.remove_entry();

+        }

+

+        return inner;

     }

 

     fn assert_is_empty(&self) {

-        assert!(self.texts.is_empty(), "Text fields were not exhausted! Text fields: {:?}", self.texts);

-        assert!(self.files.is_empty(), "File fields were not exhausted! File fields: {:?}", self.files);

+        assert!(self.texts.is_empty(), "Text Fields were not exhausted! {:?}", self.texts);

+        assert!(self.files.is_empty(), "File Fields were not exhausted! {:?}", self.files);

     }

 }

 

@@ -109,7 +136,24 @@
 }

 

 impl FileEntry {

-    fn gen_many() -> HashSet<Self> {

+    fn from_field<M: ReadEntry>(mut field: MultipartField<M>) -> (FileEntry, M) {

+        let mut data = Vec::new();

+        expect_ok_fmt!(

+            field.data.read_to_end(&mut data),

+            "Error reading file field: {:?}\n{err}", field.headers

+        );

+

+        (

+            FileEntry {

+                content_type: field.headers.content_type.unwrap_or(mime!(Application/OctetStream)),

+                filename: field.headers.filename,

+                data: PrintHex(data),

+            },

+            field.data.into_inner()

+        )

+    }

+

+    fn gen_many() -> HashSet<FileEntry> {

         collect_rand(Self::gen)

     }

 

@@ -119,10 +163,15 @@
             false => None,

         };

 

+        let data = PrintHex(match gen_bool() {

+            true => gen_string().into_bytes(),

+            false => gen_bytes(),

+        });

+

         FileEntry {

             content_type: rand_mime(),

-            filename: filename,

-            data: PrintHex(gen_bytes())

+            filename,

+            data,

         }

     }

 

@@ -131,7 +180,7 @@
     }

 }

 

-#[derive(Hash, PartialEq, Eq)]

+#[derive(PartialEq, Eq, Hash)]

 struct PrintHex(Vec<u8>);

 

 impl fmt::Debug for PrintHex {

@@ -156,7 +205,7 @@
 

 macro_rules! do_test (

     ($client_test:ident, $server_test:ident) => (

-        let _ = ::env_logger::init();

+        ::init_log();

 

         info!("Client Test: {:?} Server Test: {:?}", stringify!($client_test),

               stringify!($server_test));

@@ -203,7 +252,7 @@
 

     use std::time::Instant;

 

-    const TIME_LIMIT_SECS: u64 = 300;

+    const TIME_LIMIT_SECS: u64 = 600;

 

     #[test]

     #[ignore]

@@ -274,28 +323,30 @@
 

     let request = ClientRequest::default();

 

-    let mut test_files = test_fields.files.iter();

+    let mut test_files = test_fields.files.iter().flat_map(

+        |(name, files)| files.iter().map(move |file| (name, file))

+    );

+

+    let test_texts = test_fields.texts.iter().flat_map(

+        |(name, texts)| texts.iter().map(move |text| (name, text))

+    );

 

     let mut multipart = Multipart::from_request(request).unwrap();

    

     // Intersperse file fields amongst text fields

-    for (name, text) in &test_fields.texts {

-        if let Some((file_name, files)) = test_files.next() {

-            for file in files {

-                multipart.write_stream(file_name, &mut &*file.data.0, file.filename(),

-                                       Some(file.content_type.clone())).unwrap();

-            }

+    for (name, text) in test_texts {

+        if let Some((file_name, file)) = test_files.next() {

+            multipart.write_stream(file_name, &mut &*file.data.0, file.filename(),

+                                   Some(file.content_type.clone())).unwrap();

         }

 

         multipart.write_text(name, text).unwrap();    

     }

 

     // Write remaining files

-    for (file_name, files) in test_files {

-        for file in files {

-            multipart.write_stream(file_name, &mut &*file.data.0, file.filename(),

-                                   Some(file.content_type.clone())).unwrap();

-        }

+    for (file_name, file) in test_files {

+        multipart.write_stream(file_name, &mut &*file.data.0, file.filename(),

+                               Some(file.content_type.clone())).unwrap();

     }

 

     multipart.send().unwrap()

@@ -306,24 +357,26 @@
 

     let mut multipart = Multipart::new();

 

-    let mut test_files = test_fields.files.iter();

+    let mut test_files = test_fields.files.iter().flat_map(

+        |(name, files)| files.iter().map(move |file| (name, file))

+    );

 

-    for (name, text) in &test_fields.texts {

-        if let Some((file_name, files)) = test_files.next() {

-            for file in files {

+    let test_texts = test_fields.texts.iter().flat_map(

+        |(name, texts)| texts.iter().map(move |text| (name, text))

+    );

+

+    for (name, text) in test_texts {

+        if let Some((file_name, file)) = test_files.next() {

                 multipart.add_stream(&**file_name, Cursor::new(&file.data.0), file.filename(),

                                      Some(file.content_type.clone()));

-            }

         }

 

         multipart.add_text(&**name, &**text);

     }

 

-    for (file_name, files) in test_files {

-        for file in files {

-            multipart.add_stream(&**file_name, Cursor::new(&file.data.0), file.filename(),

-                                 Some(file.content_type.clone()));

-        }

+    for (file_name, file) in test_files {

+        multipart.add_stream(&**file_name, Cursor::new(&file.data.0), file.filename(),

+                             Some(file.content_type.clone()));

     }

 

     let mut prepared = multipart.prepare().unwrap();

@@ -350,8 +403,8 @@
     let mut multipart = Multipart::from_request(server_buf)

         .unwrap_or_else(|_| panic!("Buffer should be multipart!"));

 

-    while let Some(mut field) = multipart.read_entry_mut().unwrap_opt() {

-        fields.check_field(&mut field);

+    while let Some(field) = multipart.read_entry_mut().unwrap_opt() {

+        fields.check_field(field);

     }

 }

 

@@ -364,15 +417,14 @@
         assert!(content_len == server_buf.data.len() as u64, "Supplied content_len different from actual");

     }

 

-    let multipart = Multipart::from_request(server_buf)

+    let mut multipart = Multipart::from_request(server_buf)

         .unwrap_or_else(|_| panic!("Buffer should be multipart!"));

 

-    let mut entry = multipart.into_entry().expect_alt("Expected entry, got none", "Error reading entry");

-    fields.check_field(&mut entry);

+    let entry = multipart.into_entry().expect_alt("Expected entry, got none", "Error reading entry");

+    multipart = fields.check_field(entry);

 

-    while let Some(entry_) = entry.next_entry().unwrap_opt() {

-        entry = entry_;

-        fields.check_field(&mut entry);

+    while let Some(entry) = multipart.into_entry().unwrap_opt() {

+        multipart = fields.check_field(entry);

     }

 }

 

diff --git a/rustc_deps/vendor/multipart/src/mock.rs b/rustc_deps/vendor/multipart/src/mock.rs
index 50e1758..c8b0422 100644
--- a/rustc_deps/vendor/multipart/src/mock.rs
+++ b/rustc_deps/vendor/multipart/src/mock.rs
@@ -5,8 +5,9 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be

 // copied, modified, or distributed except according to those terms.

 //! Mocked types for client-side and server-side APIs.

+use std::cell::{Cell, RefCell};

 use std::io::{self, Read, Write};

-use std::fmt;

+use std::{fmt, thread};

 

 use rand::{self, Rng, ThreadRng};

 

@@ -63,9 +64,9 @@
     /// Wrap the given buffer with the given boundary and optional content-length.

     pub fn with_buf(buf: Vec<u8>, boundary: String, content_len: Option<u64>) -> Self {

         HttpBuffer {

-            buf: buf,

-            boundary: boundary,

-            content_len: content_len,

+            buf,

+            boundary,

+            content_len,

             rng: rand::thread_rng()

         }

     }

@@ -140,8 +141,8 @@
     /// Assumes `content_len: None`

     pub fn new(data: &'a [u8], boundary: &'a str) -> Self {

         ServerRequest {

-            data: data,

-            boundary: boundary,

+            data,

+            boundary,

             content_len: None,

             rng: rand::thread_rng(),

         }

@@ -173,3 +174,30 @@
         self

     }

 }

+

+/// A `Write` adapter that duplicates all data written to the inner writer as well as stdout.

+pub struct StdoutTee<'s, W> {

+    inner: W,

+    stdout: io::StdoutLock<'s>,

+}

+

+impl<'s, W> StdoutTee<'s, W> {

+    /// Constructor

+    pub fn new(inner: W, stdout: &'s io::Stdout) -> Self {

+        Self {

+            inner, stdout: stdout.lock(),

+        }

+    }

+}

+

+impl<'s, W: Write> Write for StdoutTee<'s, W> {

+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {

+        self.inner.write_all(buf)?;

+        self.stdout.write(buf)

+    }

+

+    fn flush(&mut self) -> io::Result<()> {

+        self.inner.flush()?;

+        self.stdout.flush()

+    }

+}

diff --git a/rustc_deps/vendor/multipart/src/server/boundary.rs b/rustc_deps/vendor/multipart/src/server/boundary.rs
index 5615e37..564dc26 100644
--- a/rustc_deps/vendor/multipart/src/server/boundary.rs
+++ b/rustc_deps/vendor/multipart/src/server/boundary.rs
@@ -10,10 +10,9 @@
 use ::safemem;

 

 use super::buf_redux::BufReader;

+use super::buf_redux::policy::MinBuffered;

 use super::twoway;

 

-use log::LogLevel;

-

 use std::cmp;

 use std::borrow::Borrow;

 

@@ -22,6 +21,8 @@
 

 use self::State::*;

 

+pub const MIN_BUF_SIZE: usize = 1024;

+

 #[derive(Debug, PartialEq, Eq)]

 enum State {

     Searching,

@@ -32,88 +33,87 @@
 /// A struct implementing `Read` and `BufRead` that will yield bytes until it sees a given sequence.

 #[derive(Debug)]

 pub struct BoundaryReader<R> {

-    source: BufReader<R>,

+    source: BufReader<R, MinBuffered>,

     boundary: Vec<u8>,

     search_idx: usize,

     state: State,

 }

 

 impl<R> BoundaryReader<R> where R: Read {

-    #[doc(hidden)]

+    /// Internal API

     pub fn from_reader<B: Into<Vec<u8>>>(reader: R, boundary: B) -> BoundaryReader<R> {

         let mut boundary = boundary.into();

         safemem::prepend(b"--", &mut boundary);

+        let source = BufReader::new(reader).set_policy(MinBuffered(MIN_BUF_SIZE));

 

         BoundaryReader {

-            source: BufReader::new(reader),

-            boundary: boundary,

+            source,

+            boundary,

             search_idx: 0,

             state: Searching,

         }

     }

 

     fn read_to_boundary(&mut self) -> io::Result<&[u8]> {

-        // Make sure there's enough bytes in the buffer to positively identify the boundary.

-        let min_len = self.search_idx + (self.boundary.len() * 2);

-

-        let buf = fill_buf_min(&mut self.source, min_len)?;

-

-        if buf.is_empty() {

-            debug!("fill_buf_min returned zero-sized buf");

-        }

+        let buf = self.source.fill_buf()?;

 

         trace!("Buf: {:?}", String::from_utf8_lossy(buf));

 

-        debug!("Before-loop Buf len: {} Search idx: {} State: {:?}",

+        debug!("Before search Buf len: {} Search idx: {} State: {:?}",

                buf.len(), self.search_idx, self.state);

 

+        if self.state == BoundaryRead || self.state == AtEnd {

+            return Ok(&buf[..self.search_idx])

+        }

+

         if self.state == Searching && self.search_idx < buf.len() {

             let lookahead = &buf[self.search_idx..];

 

-            debug!("Find boundary loop! Lookahead len: {}", lookahead.len());

-

             // Look for the boundary, or if it isn't found, stop near the end.

-            match twoway::find_bytes(lookahead, &self.boundary) {

-                Some(found_idx) => {

+            match find_boundary(lookahead, &self.boundary) {

+                Ok(found_idx) => {

                     self.search_idx += found_idx;

                     self.state = BoundaryRead;

                 },

-                None => {

-                    self.search_idx += lookahead.len().saturating_sub(self.boundary.len() + 2);

+                Err(yield_len) => {

+                    self.search_idx += yield_len;

                 }

             }

         }        

         

-        debug!("After-loop Buf len: {} Search idx: {} State: {:?}",

+        debug!("After search Buf len: {} Search idx: {} State: {:?}",

                buf.len(), self.search_idx, self.state);

 

-        // don't modify search_idx so it always points to the start of the boundary

-        let mut buf_len = self.search_idx;

-

-        // back up the cursor to before the boundary's preceding CRLF

-        if self.state != Searching && buf_len >= 2 {

-            let two_bytes_before = &buf[buf_len - 2 .. buf_len];

+        // back up the cursor to before the boundary's preceding CRLF if we haven't already

+        if self.search_idx >= 2 && !buf[self.search_idx..].starts_with(b"\r\n") {

+            let two_bytes_before = &buf[self.search_idx - 2 .. self.search_idx];

 

             trace!("Two bytes before: {:?} ({:?}) (\"\\r\\n\": {:?})",

                    String::from_utf8_lossy(two_bytes_before), two_bytes_before, b"\r\n");

 

-            if two_bytes_before == &*b"\r\n" {

+            if two_bytes_before == *b"\r\n" {

                 debug!("Subtract two!");

-                buf_len -= 2;

+                self.search_idx -= 2;

             }

         }

 

-        let ret_buf = &buf[..buf_len];

+        let ret_buf = &buf[..self.search_idx];

 

         trace!("Returning buf: {:?}", String::from_utf8_lossy(ret_buf));

 

         Ok(ret_buf)

     }

 

-    #[doc(hidden)]

+    pub fn set_min_buf_size(&mut self, min_buf_size: usize) {

+        // ensure the minimum buf size is at least enough to find a boundary with some extra

+        let min_buf_size = cmp::max(self.boundary.len() * 2, min_buf_size);

+

+        self.source.policy_mut().0 = min_buf_size;

+    }

+

     pub fn consume_boundary(&mut self) -> io::Result<bool> {

         if self.state == AtEnd {

-            return Ok(true);

+            return Ok(false);

         }

 

         while self.state == Searching {

@@ -121,17 +121,40 @@
 

             let buf_len = self.read_to_boundary()?.len();

 

+            if buf_len == 0 && self.state == Searching {

+                return Err(io::Error::new(io::ErrorKind::UnexpectedEof,

+                                          "unexpected end of request body"));

+            }

+

             debug!("Discarding {} bytes", buf_len);

 

             self.consume(buf_len);

         }

 

         let consume_amt = {

-            let min_len = self.boundary.len() + 4;

+            let buf = self.source.fill_buf()?;

 

-            let buf = fill_buf_min(&mut self.source, min_len)?;

+            // if the boundary is found we should have at least this much in-buffer

+            let mut consume_amt = self.search_idx + self.boundary.len();

 

-            if buf.len() < min_len {

+            // we don't care about data before the cursor

+            let bnd_segment = &buf[self.search_idx..];

+

+            if bnd_segment.starts_with(b"\r\n") {

+                // preceding CRLF needs to be consumed as well

+                consume_amt += 2;

+

+                // assert that we've found the boundary after the CRLF

+                debug_assert_eq!(*self.boundary, bnd_segment[2 .. self.boundary.len() + 2]);

+            } else {

+                // assert that we've found the boundary

+                debug_assert_eq!(*self.boundary, bnd_segment[..self.boundary.len()]);

+            }

+

+            // include the trailing CRLF or --

+            consume_amt += 2;

+

+            if buf.len() < consume_amt {

                 return Err(io::Error::new(io::ErrorKind::UnexpectedEof,

                                           "not enough bytes to verify boundary"));

             }

@@ -139,15 +162,16 @@
             // we have enough bytes to verify

             self.state = Searching;

 

-            let mut consume_amt = self.search_idx + self.boundary.len();

-

-            let last_two = &buf[consume_amt .. consume_amt + 2];

+            let last_two = &buf[consume_amt - 2 .. consume_amt];

 

             match last_two {

-                b"\r\n" => consume_amt += 2,

-                b"--" => { consume_amt += 2; self.state = AtEnd },

-                _ => debug!("Unexpected bytes following boundary: {:?}",

-                            String::from_utf8_lossy(&last_two)),

+                b"\r\n" => self.state = Searching,

+                b"--" => self.state = AtEnd,

+                _ => return Err(io::Error::new(

+                    io::ErrorKind::InvalidData,

+                    format!("unexpected bytes following multipart boundary: {:X} {:X}",

+                            last_two[0], last_two[1])

+                )),

             }

 

             consume_amt

@@ -155,18 +179,41 @@
 

         trace!("Consuming {} bytes, remaining buf: {:?}",

                consume_amt,

-               String::from_utf8_lossy(self.source.get_buf()));

+               String::from_utf8_lossy(self.source.buffer()));

 

         self.source.consume(consume_amt);

+

+        if cfg!(debug_assertions) {

+

+        }

+

         self.search_idx = 0;

 

         trace!("Consumed boundary (state: {:?}), remaining buf: {:?}", self.state,

-               String::from_utf8_lossy(self.source.get_buf()));

+               String::from_utf8_lossy(self.source.buffer()));

 

-        Ok(self.state == AtEnd)

+        Ok(self.state != AtEnd)

     }

 }

 

+/// Find the boundary occurrence or the highest length to safely yield

+fn find_boundary(buf: &[u8], boundary: &[u8]) -> Result<usize, usize> {

+    if let Some(idx) = twoway::find_bytes(buf, boundary) {

+        return Ok(idx);

+    }

+

+    let search_start = buf.len().saturating_sub(boundary.len());

+

+    // search for just the boundary fragment

+    for i in search_start .. buf.len() {

+        if boundary.starts_with(&buf[i..]) {

+            return Err(i);

+        }

+    }

+

+    Err(buf.len())

+}

+

 #[cfg(feature = "bench")]

 impl<'a> BoundaryReader<io::Cursor<&'a [u8]>> {

     fn new_with_bytes(bytes: &'a [u8], boundary: &str) -> Self {

@@ -215,24 +262,12 @@
     }

 }

 

-fn fill_buf_min<R: Read>(buf: &mut BufReader<R>, min: usize) -> io::Result<&[u8]> {

-    let mut attempts = 0;

-

-    while buf.available() < min && attempts < min {

-        if buf.read_into_buf()? == 0 { break; };

-        attempts += 1;

-    }

-

-    Ok(buf.get_buf())

-}

-

 #[cfg(test)]

 mod test {

     use super::BoundaryReader;

 

     use std::io;

     use std::io::prelude::*;

-    use std::slice;

 

     const BOUNDARY: &'static str = "boundary";

     const TEST_VAL: &'static str = "--boundary\r\n\

@@ -243,7 +278,8 @@
         

     #[test]

     fn test_boundary() {

-        let _ = ::env_logger::init();        

+        ::init_log();

+

         debug!("Testing boundary (no split)");

 

         let src = &mut TEST_VAL.as_bytes();

@@ -288,7 +324,8 @@
 

     #[test]

     fn test_split_boundary() {

-        let _ = ::env_logger::init();        

+        ::init_log();

+

         debug!("Testing boundary (split)");

 

         let mut buf = String::new();

@@ -301,7 +338,6 @@
             let mut reader = BoundaryReader::from_reader(src, BOUNDARY);

             test_boundary_reader(&mut reader, &mut buf);

         }

-

     }

 

     fn test_boundary_reader<R: Read>(reader: &mut BoundaryReader<R>, buf: &mut String) {

@@ -337,7 +373,31 @@
     }

 

     #[test]

+    fn test_empty_body() {

+        ::init_log();

+

+        // empty body contains closing boundary only

+        let mut body: &[u8] = b"--boundary--";

+

+        let ref mut buf = String::new();

+        let mut reader = BoundaryReader::from_reader(&mut body, BOUNDARY);

+

+        debug!("Consume 1");

+        assert_eq!(reader.consume_boundary().unwrap(), false);

+

+        debug!("Read 1");

+        let _ = reader.read_to_string(buf).unwrap();

+        assert_eq!(buf, "");

+        buf.clear();

+

+        debug!("Consume 2");

+        assert_eq!(reader.consume_boundary().unwrap(), false);

+    }

+

+    #[test]

     fn test_leading_crlf() {

+        ::init_log();

+

         let mut body: &[u8] = b"\r\n\r\n--boundary\r\n\

                          asdf1234\

                          \r\n\r\n--boundary--";

@@ -347,7 +407,7 @@
 

 

         debug!("Consume 1");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), true);

 

         debug!("Read 1");

         let _ = reader.read_to_string(buf).unwrap();

@@ -355,7 +415,7 @@
         buf.clear();

 

         debug!("Consume 2");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), false);

 

         debug!("Read 2 (empty)");

         let _ = reader.read_to_string(buf).unwrap();

@@ -364,6 +424,8 @@
 

     #[test]

     fn test_trailing_crlf() {

+        ::init_log();

+

         let mut body: &[u8] = b"--boundary\r\n\

                          asdf1234\

                          \r\n\r\n--boundary\r\n\

@@ -373,7 +435,7 @@
         let mut reader = BoundaryReader::from_reader(&mut body, BOUNDARY);

 

         debug!("Consume 1");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), true);

 

         debug!("Read 1");

 

@@ -388,7 +450,7 @@
         buf.clear();

 

         debug!("Consume 2");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), true);

 

         debug!("Read 2");

         let _ = reader.read_to_string(buf).unwrap();

@@ -396,7 +458,7 @@
         buf.clear();

 

         debug!("Consume 3");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), false);

 

         debug!("Read 3 (empty)");

         let _ = reader.read_to_string(buf).unwrap();

@@ -406,6 +468,8 @@
     // https://github.com/abonander/multipart/issues/93#issuecomment-343610587

     #[test]

     fn test_trailing_lflf() {

+        ::init_log();

+

         let mut body: &[u8] = b"--boundary\r\n\

                          asdf1234\

                          \n\n\r\n--boundary\r\n\

@@ -415,7 +479,7 @@
         let mut reader = BoundaryReader::from_reader(&mut body, BOUNDARY);

 

         debug!("Consume 1");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), true);

 

         debug!("Read 1");

 

@@ -429,7 +493,7 @@
         buf.clear();

 

         debug!("Consume 2");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), true);

 

         debug!("Read 2");

         let _ = reader.read_to_string(buf).unwrap();

@@ -437,13 +501,82 @@
         buf.clear();

 

         debug!("Consume 3");

-        reader.consume_boundary().unwrap();

+        assert_eq!(reader.consume_boundary().unwrap(), false);

 

         debug!("Read 3 (empty)");

         let _ = reader.read_to_string(buf).unwrap();

         assert_eq!(buf, "");

     }

 

+    // https://github.com/abonander/multipart/issues/104

+    #[test]

+    fn test_unterminated_body() {

+        ::init_log();

+

+        let mut body: &[u8] = b"--boundary\r\n\

+                         asdf1234\

+                         \n\n\r\n--boundary\r\n\

+                         hjkl5678  ";

+

+        let ref mut buf = String::new();

+        let mut reader = BoundaryReader::from_reader(&mut body, BOUNDARY);

+

+        debug!("Consume 1");

+        assert_eq!(reader.consume_boundary().unwrap(), true);

+

+        debug!("Read 1");

+

+        // same as above

+        let buf1 = reader.read_to_boundary().unwrap().to_owned();

+        let buf2 = reader.read_to_boundary().unwrap().to_owned();

+        assert_eq!(buf1, buf2);

+

+        let _ = reader.read_to_string(buf).unwrap();

+        assert_eq!(buf, "asdf1234\n\n");

+        buf.clear();

+

+        debug!("Consume 2");

+        assert_eq!(reader.consume_boundary().unwrap(), true);

+

+        debug!("Read 2");

+        let _ = reader.read_to_string(buf).unwrap();

+        assert_eq!(buf, "hjkl5678  ");

+        buf.clear();

+

+        debug!("Consume 3 - expecting error");

+        reader.consume_boundary().unwrap_err();

+    }

+

+    #[test]

+    fn test_lone_boundary() {

+        let mut body: &[u8] = b"--boundary";

+        let mut reader = BoundaryReader::from_reader(&mut body, "boundary");

+        reader.consume_boundary().unwrap_err();

+    }

+

+    #[test]

+    fn test_invalid_boundary() {

+        let mut body: &[u8] = b"--boundary\x00\x00";

+        let mut reader = BoundaryReader::from_reader(&mut body, "boundary");

+        reader.consume_boundary().unwrap_err();

+    }

+

+    #[test]

+    fn test_skip_field() {

+        let mut body: &[u8] = b"--boundary\r\nfield1\r\n--boundary\r\nfield2\r\n--boundary--";

+        let mut reader = BoundaryReader::from_reader(&mut body, "boundary");

+

+        assert_eq!(reader.consume_boundary().unwrap(), true);

+        // skip `field1`

+        assert_eq!(reader.consume_boundary().unwrap(), true);

+

+        let mut buf = String::new();

+        reader.read_to_string(&mut buf).unwrap();

+        assert_eq!(buf, "field2");

+

+        assert_eq!(reader.consume_boundary().unwrap(), false);

+    }

+

     #[cfg(feature = "bench")]

     mod bench {

         extern crate test;

diff --git a/rustc_deps/vendor/multipart/src/server/field.rs b/rustc_deps/vendor/multipart/src/server/field.rs
index bcb2720..2bf18b6 100644
--- a/rustc_deps/vendor/multipart/src/server/field.rs
+++ b/rustc_deps/vendor/multipart/src/server/field.rs
@@ -6,27 +6,40 @@
 // copied, modified, or distributed except according to those terms.

 

 //! `multipart` field header parsing.

+use mime::{Mime, TopLevel, SubLevel};

 

-use super::httparse::{self, EMPTY_HEADER, Header, Status};

+use quick_error::ResultExt;

+

+use std::error::Error;

+use std::io::{self, Read, BufRead};

+use std::{str, fmt};

+

+// The AsciiExt import is needed for Rust older than 1.23.0. These two lines can

+// be removed when supporting older Rust is no longer needed.

+#[allow(deprecated, unused_imports)]

+use std::ascii::AsciiExt;

+

+use super::httparse::{self, EMPTY_HEADER, Header, Status, Error as HttparseError};

 

 use self::ReadEntryResult::*;

 

-use super::save::{SaveBuilder, SavedFile};

+use super::save::SaveBuilder;

 

-use mime::{TopLevel, Mime};

-

-use std::io::{self, Read, BufRead, Write};

-use std::ops::Deref;

-use std::path::{Path, PathBuf};

-use std::{str, fmt, error};

-

-use std::ascii::AsciiExt;

+use super::ArcStr;

 

 const EMPTY_STR_HEADER: StrHeader<'static> = StrHeader {

     name: "",

     val: "",

 };

 

+macro_rules! invalid_cont_disp {

+    ($reason: expr, $cause: expr) => {

+        return Err(

+            ParseHeaderError::InvalidContDisp($reason, $cause.to_string())

+        );

+    }

+}

+

 /// Not exposed

 #[derive(Copy, Clone, Debug)]

 pub struct StrHeader<'a> {

@@ -34,43 +47,50 @@
     val: &'a str,

 }

 

-const MAX_ATTEMPTS: usize = 30;

+struct DisplayHeaders<'s, 'a: 's>(&'s [StrHeader<'a>]);

+

+impl <'s, 'a: 's> fmt::Display for  DisplayHeaders<'s, 'a> {

+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

+        for hdr in self.0 {

+            writeln!(f, "{}: {}", hdr.name, hdr.val)?;

+        }

+

+        Ok(())

+    }

+}

 

 fn with_headers<R, F, Ret>(r: &mut R, closure: F) -> Result<Ret, ParseHeaderError>

 where R: BufRead, F: FnOnce(&[StrHeader]) -> Ret {

     const HEADER_LEN: usize = 4;

 

-    // These are only written once so they don't need to be `mut` or initialized.

     let consume;

     let ret;

 

-    let mut attempts = 0;

+    let mut last_len = 0;

 

     loop {

-        let mut raw_headers = [EMPTY_HEADER; HEADER_LEN];

-

+        // this should return a larger buffer each time

         let buf = r.fill_buf()?;

 

-        if attempts == MAX_ATTEMPTS {

-            return Err(ParseHeaderError::Other("Could not read field headers".to_string()));

+        // buffer has stopped growing

+        if buf.len() == last_len {

+            return Err(ParseHeaderError::TooLarge);

         }

 

-        // FIXME: https://github.com/seanmonstar/httparse/issues/34

-        match httparse::parse_headers(buf, &mut raw_headers) {

-            Ok(Status::Complete((consume_, raw_headers))) =>  {

-                consume = consume_;

+        let mut raw_headers = [EMPTY_HEADER; HEADER_LEN];

+

+        match httparse::parse_headers(buf, &mut raw_headers)? {

+            // read more and try again

+            Status::Partial => last_len = buf.len(),

+            Status::Complete((consume_, raw_headers)) => {

                 let mut headers = [EMPTY_STR_HEADER; HEADER_LEN];

                 let headers = copy_headers(raw_headers, &mut headers)?;

                 debug!("Parsed headers: {:?}", headers);

+                consume = consume_;

                 ret = closure(headers);

                 break;

             },

-            Ok(Status::Partial) => {

-                attempts += 1;

-                continue;

-            },

-            Err(err) => return Err(ParseHeaderError::from(err)),

-        };

+        }

     }

 

     r.consume(consume);

@@ -87,11 +107,26 @@
 }

 

 /// The headers that (may) appear before a `multipart/form-data` field.

+///

+/// ### Warning: Values are Client-Provided

+/// Everything in this struct are values from the client and should be considered **untrustworthy**.

+/// This crate makes no effort to validate or sanitize any client inputs.

+#[derive(Clone, Debug)]

 pub struct FieldHeaders {

-    /// The `Content-Disposition` header, required.

-    cont_disp: ContentDisp,

-    /// The `Content-Type` header, optional.

-    cont_type: Option<Mime>,

+    /// The field's name from the form.

+    pub name: ArcStr,

+

+    /// The filename of this entry, if supplied. This is not guaranteed to match the original file

+    /// or even to be a valid filename for the current platform.

+    pub filename: Option<String>,

+

+    /// The MIME type (`Content-Type` value) of this file, if supplied by the client.

+    ///

+    /// If this is not supplied, the content-type of the field should default to `text/plain` as

+    /// per [IETF RFC 7578, section 4.4](https://tools.ietf.org/html/rfc7578#section-4.4), but this

+    /// should not be implicitly trusted. This crate makes no attempt to identify or validate

+    /// the content-type of the actual field data.

+    pub content_type: Option<Mime>,

 }

 

 impl FieldHeaders {

@@ -101,16 +136,18 @@
     }

 

     fn parse(headers: &[StrHeader]) -> Result<FieldHeaders, ParseHeaderError> {

-        let cont_disp = ContentDisp::parse(headers)?.ok_or(ParseHeaderError::MissingContentDisposition)?;

+        let cont_disp = ContentDisp::parse_required(headers)?;

+

         Ok(FieldHeaders {

-            cont_disp: cont_disp,

-            cont_type: parse_cont_type(headers)?,

+            name: cont_disp.field_name.into(),

+            filename: cont_disp.filename,

+            content_type: parse_content_type(headers)?,

         })

     }

 }

 

 /// The `Content-Disposition` header.

-pub struct ContentDisp {

+struct ContentDisp {

     /// The name of the `multipart/form-data` field.

     field_name: String,

     /// The optional filename for this field.

@@ -118,70 +155,85 @@
 }

 

 impl ContentDisp {

-    fn parse(headers: &[StrHeader]) -> Result<Option<ContentDisp>, ParseHeaderError> {

-        const CONT_DISP: &'static str = "Content-Disposition";

-        let header = if let Some(header) = find_header(headers, CONT_DISP) {

+    fn parse_required(headers: &[StrHeader]) -> Result<ContentDisp, ParseHeaderError> {

+        let header = if let Some(header) = find_header(headers, "Content-Disposition") {

             header

         } else {

-            return Ok(None);

+            return Err(ParseHeaderError::MissingContentDisposition(

+                DisplayHeaders(headers).to_string()

+            ));

         };

 

-        const NAME: &'static str = "name=";

-        const FILENAME: &'static str = "filename=";

-

+        // Content-Disposition: ?

         let after_disp_type = match split_once(header.val, ';') {

             Some((disp_type, after_disp_type)) => {

+                // assert Content-Disposition: form-data

+                // but needs to be parsed out to trim the spaces (allowed by spec IIRC)

                 if disp_type.trim() != "form-data" {

-                    let err = format!("Unexpected Content-Disposition value: {:?}", disp_type);

-                    return Err(ParseHeaderError::Invalid(err));

+                    invalid_cont_disp!("unexpected Content-Disposition value", disp_type);

                 }

                 after_disp_type

             },

-            None => {

-                let err = format!("Expected additional data after Content-Disposition type, got {:?}", header.val);

-                return Err(ParseHeaderError::Invalid(err));

-            }

+            None => invalid_cont_disp!("expected additional data after Content-Disposition type",

+                                       header.val),

         };

 

-        let (field_name, filename) = match get_str_after(NAME, ';', after_disp_type) {

-            None => {

-                let err = format!("Expected field name and maybe filename, got {:?}", after_disp_type);

-                return Err(ParseHeaderError::Invalid(err));

-            },

+        // Content-Disposition: form-data; name=?

+        let (field_name, filename) = match get_str_after("name=", ';', after_disp_type) {

+            None => invalid_cont_disp!("expected field name and maybe filename, got",

+                                       after_disp_type),

+            // Content-Disposition: form-data; name={field_name}; filename=?

             Some((field_name, after_field_name)) => {

                 let field_name = trim_quotes(field_name);

-                let filename = get_str_after(FILENAME, ';', after_field_name).map(|(filename, _)| trim_quotes(filename).to_owned());

+                let filename = get_str_after("filename=", ';', after_field_name)

+                    .map(|(filename, _)| trim_quotes(filename).to_owned());

                 (field_name, filename)

             },

         };

 

-        Ok(Some(ContentDisp { field_name: field_name.to_owned(), filename: filename }))

+        Ok(ContentDisp { field_name: field_name.to_owned(), filename })

     }

 }

 

-fn parse_cont_type(headers: &[StrHeader]) -> Result<Option<Mime>, ParseHeaderError> {

-    const CONTENT_TYPE: &'static str = "Content-Type";

-    let header = if let Some(header) = find_header(headers, CONTENT_TYPE) {

-        header

+fn parse_content_type(headers: &[StrHeader]) -> Result<Option<Mime>, ParseHeaderError> {

+    if let Some(header) = find_header(headers, "Content-Type") {

+        // Boundary parameter will be parsed into the `Mime`

+        debug!("Found Content-Type: {:?}", header.val);

+        Ok(Some(header.val.parse::<Mime>()

+            .map_err(|_| ParseHeaderError::MimeError(header.val.into()))?))

     } else {

-        return Ok(None)

-    };

-

-    // Boundary parameter will be parsed into the `Mime`

-    debug!("Found Content-Type: {:?}", header.val);

-    Ok(Some(read_content_type(header.val.trim())))

+        Ok(None)

+    }

 }

 

-/// A field in a multipart request. May be either text or a binary stream (file).

+/// A field in a multipart request with its associated headers and data.

 #[derive(Debug)]

 pub struct MultipartField<M: ReadEntry> {

-    /// The field's name from the form

-    pub name: String,

-    /// The data of the field. Can be text or binary.

+    /// The headers for this field, including the name, filename, and content-type, if provided.

+    ///

+    /// ### Warning: Values are Client-Provided

+    /// Everything in this struct are values from the client and should be considered **untrustworthy**.

+    /// This crate makes no effort to validate or sanitize any client inputs.

+    pub headers: FieldHeaders,

+

+    /// The field's data.

     pub data: MultipartData<M>,

 }

 

 impl<M: ReadEntry> MultipartField<M> {

+    /// Returns `true` if this field has no content-type or the content-type is `text/...`.

+    ///

+    /// This typically means it can be read to a string, but it could still be using an unsupported

+    /// character encoding, so decoding to `String` needs to ensure that the data is valid UTF-8.

+    ///

+    /// Note also that the field contents may be too large to reasonably fit in memory.

+    /// The `.save()` adapter can be used to enforce a size limit.

+    ///

+    /// Detecting character encodings by any means is (currently) beyond the scope of this crate.

+    pub fn is_text(&self) -> bool {

+        self.headers.content_type.as_ref().map_or(true, |ct| ct.0 == TopLevel::Text)

+    }

+

     /// Read the next entry in the request.

     pub fn next_entry(self) -> ReadEntryResult<M> {

         self.data.into_inner().read_entry()

@@ -212,291 +264,72 @@
 }

 

 /// The data of a field in a `multipart/form-data` request.

+///

+/// You can read it to EOF, or use the `save()` adaptor to save it to disk/memory.

 #[derive(Debug)]

-pub enum MultipartData<M> {

-    /// The field's payload is a text string.

-    Text(MultipartText<M>),

-    /// The field's payload is a binary stream (file).

-    File(MultipartFile<M>),

-}

-

-impl<M: ReadEntry> MultipartData<M> {

-    /// Borrow this payload as a text field, if possible.

-    pub fn as_text(&self) -> Option<&str> {

-        match *self {

-            MultipartData::Text(ref text) => Some(&text.text),

-            _ => None,

-        }

-    }

-

-    /// Borrow this payload as a file field, if possible.

-    /// Mutably borrows so the contents can be read.

-    pub fn as_file(&mut self) -> Option<&mut MultipartFile<M>> {

-        match *self {

-            MultipartData::File(ref mut file) => Some(file),

-            _ => None,

-        }

-    }

-

-    /// Return the inner `Multipart`.

-    pub fn into_inner(self) -> M {

-        use self::MultipartData::*;

-

-        match self {

-            Text(text) => text.into_inner(),

-            File(file) => file.into_inner(),

-        }

-    }

-

-    fn take_inner(&mut self) -> M {

-        use self::MultipartData::*;

-

-        match *self {

-            Text(ref mut text) => text.take_inner(),

-            File(ref mut file) => file.take_inner(),

-        }

-    }

-

-    fn give_inner(&mut self, inner: M) {

-        use self::MultipartData::*;

-

-        let inner = Some(inner);

-

-        match *self {

-            Text(ref mut text) => text.inner = inner,

-            File(ref mut file) => file.inner = inner,

-        }

-    }

-}

-

-/// A representation of a text field in a `multipart/form-data` body.

-#[derive(Debug)]

-pub struct MultipartText<M> {

-    /// The text of this field.

-    pub text: String,

-    /// The `Multipart` this field was read from.

+pub struct MultipartData<M> {

     inner: Option<M>,

 }

 

-impl<M> Deref for MultipartText<M> {

-    type Target = str;

+const DATA_INNER_ERR: &str = "MultipartFile::inner taken and not replaced; this is likely \

+                              caused by a logic error in `multipart` or by resuming after \

+                              a previously caught panic.\nPlease open an issue with the \

+                              relevant backtrace and debug logs at \

+                              https://github.com/abonander/multipart";

 

-    fn deref(&self) -> &Self::Target {

-        &self.text

-    }

-}

-

-impl<M> Into<String> for MultipartText<M> {

-    fn into(self) -> String {

-        self.text

-    }

-}

-

-impl<M> MultipartText<M> {

-    #[doc(hidden)]

-    pub fn take_inner(&mut self) -> M {

-        self.inner.take().expect("MultipartText::inner already taken!")

-    }

-

-    fn into_inner(self) -> M {

-        self.inner.expect("MultipartText::inner taken!")

-    }

-}

-

-/// A representation of a file in HTTP `multipart/form-data`.

-///

-/// Note that the file is not yet saved to the local filesystem;

-/// instead, this struct exposes `Read` and `BufRead` impls which point

-/// to the beginning of the file's contents in the HTTP stream.

-///

-/// You can read it to EOF, or use one of the `save()` method

-/// to save it to disk.

-#[derive(Debug)]

-pub struct MultipartFile<M> {

-    /// The filename of this entry, if supplied.

-    ///

-    /// ### Warning: Client Provided / Untrustworthy

-    /// You should treat this value as **untrustworthy** because it is an arbitrary string

-    /// provided by the client.

-    ///

-    /// It is a serious security risk to create files or directories with paths based on user input.

-    /// A malicious user could craft a path which can be used to overwrite important files, such as

-    /// web templates, static assets, Javascript files, database files, configuration files, etc.,

-    /// if they are writable by the server process.

-    ///

-    /// This can be mitigated somewhat by setting filesystem permissions as

-    /// conservatively as possible and running the server under its own user with restricted

-    /// permissions, but you should still not use user input directly as filesystem paths.

-    /// If it is truly necessary, you should sanitize filenames such that they cannot be

-    /// misinterpreted by the OS. Such functionality is outside the scope of this crate.

-    pub filename: Option<String>,

-

-    /// The MIME type (`Content-Type` value) of this file, if supplied by the client,

-    /// or `"applicaton/octet-stream"` otherwise.

-    ///

-    /// ### Note: Client Provided

-    /// Consider this value to be potentially untrustworthy, as it is provided by the client.

-    /// It may be inaccurate or entirely wrong, depending on how the client determined it.

-    ///

-    /// Some variants wrap arbitrary strings which could be abused by a malicious user if your

-    /// application performs any non-idempotent operations based on their value, such as

-    /// starting another program or querying/updating a database (web-search "SQL injection").

-    pub content_type: Mime,

-

-    /// The `Multipart` this field was read from.

-    inner: Option<M>,

-}

-

-impl<M> MultipartFile<M> {

-    /// Get the filename of this entry, if supplied.

-    ///

-    /// ### Warning: Client Provided / Untrustworthy

-    /// You should treat this value as **untrustworthy** because it is an arbitrary string

-    /// provided by the client.

-    ///

-    /// It is a serious security risk to create files or directories with paths based on user input.

-    /// A malicious user could craft a path which can be used to overwrite important files, such as

-    /// web templates, static assets, Javascript files, database files, configuration files, etc.,

-    /// if they are writable by the server process.

-    ///

-    /// This can be mitigated somewhat by setting filesystem permissions as

-    /// conservatively as possible and running the server under its own user with restricted

-    /// permissions, but you should still not use user input directly as filesystem paths.

-    /// If it is truly necessary, you should sanitize filenames such that they cannot be

-    /// misinterpreted by the OS. Such functionality is outside the scope of this crate.

-    #[deprecated(since = "0.10.0", note = "`filename` field is now public")]

-    pub fn filename(&self) -> Option<&str> {

-        self.filename.as_ref().map(String::as_ref)

-    }

-

-    /// Get the MIME type (`Content-Type` value) of this file, if supplied by the client,

-    /// or `"applicaton/octet-stream"` otherwise.

-    ///

-    /// ### Note: Client Provided

-    /// Consider this value to be potentially untrustworthy, as it is provided by the client.

-    /// It may be inaccurate or entirely wrong, depending on how the client determined it.

-    ///

-    /// Some variants wrap arbitrary strings which could be abused by a malicious user if your

-    /// application performs any non-idempotent operations based on their value, such as

-    /// starting another program or querying/updating a database (web-search "SQL injection").

-    #[deprecated(since = "0.10.0", note = "`content_type` field is now public")]

-    pub fn content_type(&self) -> &Mime {

-        &self.content_type

-    }

-

-

-    fn inner_mut(&mut self) -> &mut M {

-        self.inner.as_mut().expect("MultipartFile::inner taken!")

-    }

-

-    #[doc(hidden)]

-    pub fn take_inner(&mut self) -> M {

-        self.inner.take().expect("MultipartFile::inner already taken!")

-    }

-

-    fn into_inner(self) -> M {

-        self.inner.expect("MultipartFile::inner taken!")

-    }

-}

-

-impl<M> MultipartFile<M> where M: ReadEntry {

-    /// Get a builder type which can save the file with or without a size limit.

+impl<M> MultipartData<M> where M: ReadEntry {

+    /// Get a builder type which can save the field with or without a size limit.

     pub fn save(&mut self) -> SaveBuilder<&mut Self> {

         SaveBuilder::new(self)

     }

 

-    /// Save this file to the given output stream.

-    ///

-    /// If successful, returns the number of bytes written.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().write_to()` instead")]

-    pub fn save_to<W: Write>(&mut self, out: W) -> io::Result<u64> {

-        self.save().write_to(out).into_result_strict()

+    /// Take the inner `Multipart` or `&mut Multipart`

+    pub fn into_inner(self) -> M {

+        self.inner.expect(DATA_INNER_ERR)

     }

 

-    /// Save this file to the given output stream, **truncated** to `limit`

-    /// (no more than `limit` bytes will be written out).

+    /// Set the minimum buffer size that `BufRead::fill_buf(self)` will return

+    /// until the end of the stream is reached. Set this as small as you can tolerate

+    /// to minimize `read()` calls (`read()` won't be called again until the buffer

+    /// is smaller than this).

     ///

-    /// If successful, returns the number of bytes written.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().size_limit(limit).write_to(out)` instead")]

-    pub fn save_to_limited<W: Write>(&mut self, out: W, limit: u64) -> io::Result<u64> {

-        self.save().size_limit(limit).write_to(out).into_result_strict()

+    /// This value is reset between fields.

+    pub fn set_min_buf_size(&mut self, min_buf_size: usize) {

+        self.inner_mut().set_min_buf_size(min_buf_size)

     }

 

-    /// Save this file to `path`.

-    ///

-    /// Returns the saved file info on success, or any errors otherwise.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().with_path(path)` instead")]

-    pub fn save_as<P: Into<PathBuf>>(&mut self, path: P) -> io::Result<SavedFile> {

-        self.save().with_path(path).into_result_strict()

+    fn inner_mut(&mut self) -> &mut M {

+        self.inner.as_mut().expect(DATA_INNER_ERR)

     }

 

-    /// Save this file in the directory pointed at by `dir`,

-    /// using a random alphanumeric string as the filename.

-    ///

-    /// Any missing directories in the `dir` path will be created.

-    ///

-    /// Returns the saved file's info on success, or any errors otherwise.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().with_dir(dir)` instead")]

-    pub fn save_in<P: AsRef<Path>>(&mut self, dir: P) -> io::Result<SavedFile> {

-        self.save().with_dir(dir.as_ref()).into_result_strict()

+    fn take_inner(&mut self) -> M {

+        self.inner.take().expect(DATA_INNER_ERR)

     }

 

-    /// Save this file to `path`, **truncated** to `limit` (no more than `limit` bytes will be written out).

-    ///

-    /// Any missing directories in the `dir` path will be created.

-    ///

-    /// Returns the saved file's info on success, or any errors otherwise.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().size_limit(limit).with_path(path)` instead")]

-    pub fn save_as_limited<P: Into<PathBuf>>(&mut self, path: P, limit: u64) -> io::Result<SavedFile> {

-        self.save().size_limit(limit).with_path(path).into_result_strict()

-    }

-

-    /// Save this file in the directory pointed at by `dir`,

-    /// using a random alphanumeric string as the filename.

-    ///

-    /// **Truncates** file to `limit` (no more than `limit` bytes will be written out).

-    ///

-    /// Any missing directories in the `dir` path will be created.

-    ///

-    /// Returns the saved file's info on success, or any errors otherwise.

-    ///

-    /// Retries when `io::Error::kind() == io::ErrorKind::Interrupted`.

-    #[deprecated(since = "0.10.0", note = "use `.save().size_limit(limit).with_dir(dir)` instead")]

-    pub fn save_in_limited<P: AsRef<Path>>(&mut self, dir: P, limit: u64) -> io::Result<SavedFile> {

-        self.save().size_limit(limit).with_dir(dir).into_result_strict()

+    fn give_inner(&mut self, inner: M) {

+        self.inner = Some(inner);

     }

 }

 

-impl<M: ReadEntry> Read for MultipartFile<M> {

+impl<M: ReadEntry> Read for MultipartData<M> {

     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>{

-        self.inner_mut().source().read(buf)

+        self.inner_mut().source_mut().read(buf)

     }

 }

 

-impl<M: ReadEntry> BufRead for MultipartFile<M> {

+/// In this implementation, `fill_buf()` can return more data with each call.

+///

+/// Use `set_min_buf_size()` if you require a minimum buffer length.

+impl<M: ReadEntry> BufRead for MultipartData<M> {

     fn fill_buf(&mut self) -> io::Result<&[u8]> {

-        self.inner_mut().source().fill_buf()

+        self.inner_mut().source_mut().fill_buf()

     }

 

     fn consume(&mut self, amt: usize) {

-        self.inner_mut().source().consume(amt)

+        self.inner_mut().source_mut().consume(amt)

     }

 }

 

-fn read_content_type(cont_type: &str) -> Mime {

-    cont_type.parse().ok().unwrap_or_else(::mime_guess::octet_stream)

-}

-

 fn split_once(s: &str, delim: char) -> Option<(&str, &str)> {

     s.find(delim).map(|idx| s.split_at(idx))

 }

@@ -518,8 +351,8 @@
 }

 

 fn find_header<'a, 'b>(headers: &'a [StrHeader<'b>], name: &str) -> Option<&'a StrHeader<'b>> {

-    /// Field names are case insensitive and consist of ASCII characters

-    /// only (see https://tools.ietf.org/html/rfc822#section-3.2).

+    // Field names are case insensitive and consist of ASCII characters

+    // only (see https://tools.ietf.org/html/rfc822#section-3.2).

     headers.iter().find(|header| header.name.eq_ignore_ascii_case(name))

 }

 

@@ -527,50 +360,34 @@
 pub trait ReadEntry: PrivReadEntry + Sized {

     /// Attempt to read the next entry in the multipart stream.

     fn read_entry(mut self) -> ReadEntryResult<Self> {

+        self.set_min_buf_size(super::boundary::MIN_BUF_SIZE);

+

         debug!("ReadEntry::read_entry()");

 

-        if try_read_entry!(self; self.consume_boundary()) {

+        if !try_read_entry!(self; self.consume_boundary()) {

             return End(self);

         }

 

-        let field_headers = try_read_entry!(self; self.read_headers());

+        let field_headers: FieldHeaders = try_read_entry!(self; self.read_headers());

 

-        let data = match field_headers.cont_type {

-            Some(cont_type) => {

-                match cont_type.0 {

-                    TopLevel::Multipart => {

-                        let msg = format!("Error on field {:?}: nested multipart fields are \

-                                           not supported. However, reports of clients sending \

-                                           requests like this are welcome at \

-                                           https://github.com/abonander/multipart/issues/56",

-                                          field_headers.cont_disp.field_name);

-

-                        return ReadEntryResult::invalid_data(self, msg);

-                    },

-                    _ => {

-                        MultipartData::File(

-                            MultipartFile {

-                                filename: field_headers.cont_disp.filename,

-                                content_type: cont_type,

-                                inner: Some(self)

-                            }

-                        )

-                    }

-                }

-            },

-            None => {

-                let text = try_read_entry!(self; self.read_to_string());

-                MultipartData::Text(MultipartText {

-                    text: text,

-                    inner: Some(self),

-                })

-            },

-        };

+        if let Some(ct) = field_headers.content_type.as_ref() {

+            if ct.0 == TopLevel::Multipart {

+                // fields of this type are sent by (supposedly) no known clients

+                // (https://tools.ietf.org/html/rfc7578#appendix-A) so I'd be fascinated

+                // to hear about any in the wild

+                info!("Found nested multipart field: {:?}:\r\n\

+                       Please report this client's User-Agent and any other available details \

+                       at https://github.com/abonander/multipart/issues/56",

+                       field_headers);

+            }

+        }

 

         Entry(

             MultipartField {

-                name: field_headers.cont_disp.field_name,

-                data: data,

+                headers: field_headers,

+                data: MultipartData {

+                    inner: Some(self),

+                },

             }

         )

     }

@@ -587,21 +404,23 @@
 pub trait PrivReadEntry {

     type Source: BufRead;

 

-    fn source(&mut self) -> &mut Self::Source;

+    fn source_mut(&mut self) -> &mut Self::Source;

+

+    fn set_min_buf_size(&mut self, min_buf_size: usize);

 

     /// Consume the next boundary.

-    /// Returns `true` if the last boundary was read, `false` otherwise.

+    /// Returns `true` if a field should follow, `false` otherwise.

     fn consume_boundary(&mut self) -> io::Result<bool>;

 

     fn read_headers(&mut self) -> Result<FieldHeaders, io::Error> {

-        FieldHeaders::read_from(&mut self.source())

+        FieldHeaders::read_from(self.source_mut())

             .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))

     }

 

     fn read_to_string(&mut self) -> io::Result<String> {

         let mut buf = String::new();

 

-        match self.source().read_to_string(&mut buf) {

+        match self.source_mut().read_to_string(&mut buf) {

             Ok(_) => Ok(buf),

             Err(err) => Err(err),

         }

@@ -611,8 +430,12 @@
 impl<'a, M: ReadEntry> PrivReadEntry for &'a mut M {

     type Source = M::Source;

 

-    fn source(&mut self) -> &mut M::Source {

-        (**self).source()

+    fn source_mut(&mut self) -> &mut M::Source {

+        (**self).source_mut()

+    }

+

+    fn set_min_buf_size(&mut self, min_buf_size: usize) {

+        (**self).set_min_buf_size(min_buf_size)

     }

 

     fn consume_boundary(&mut self) -> io::Result<bool> {

@@ -682,65 +505,43 @@
             Error(_, err) => panic!("{}: {:?}", msg, err),

         }

     }

-

-    fn invalid_data(multipart: M, msg: String) -> Self {

-        ReadEntryResult::Error (

-            multipart,

-            io::Error::new(io::ErrorKind::InvalidData, msg),

-        )

-    }

 }

 

+const GENERIC_PARSE_ERR: &str = "an error occurred while parsing field headers";

 

-#[derive(Debug)]

-enum ParseHeaderError {

-    /// The `Content-Disposition` header was not found

-    MissingContentDisposition,

-    /// The header was found but could not be parsed

-    Invalid(String),

-    /// IO error

-    Io(io::Error),

-    Other(String),

-}

-

-impl fmt::Display for ParseHeaderError {

-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

-        match *self {

-            ParseHeaderError::MissingContentDisposition => write!(f, "\"Content-Disposition\" header not found (ParseHeaderError::MissingContentDisposition)"),

-            ParseHeaderError::Invalid(ref msg) => write!(f, "invalid header (ParseHeaderError::Invalid({}))", msg),

-            ParseHeaderError::Io(_) => write!(f, "could not read header (ParseHeaderError::Io)"),

-            ParseHeaderError::Other(ref reason) => write!(f, "unknown parsing error (ParseHeaderError::Other(\"{}\"))", reason),

+quick_error! {

+    #[derive(Debug)]

+    enum ParseHeaderError {

+        /// The `Content-Disposition` header was not found

+        MissingContentDisposition(headers: String) {

+            display(x) -> ("{}:\n{}", x.description(), headers)

+            description("\"Content-Disposition\" header not found in field headers")

         }

-    }

-}

-

-impl error::Error for ParseHeaderError {

-    fn description(&self) -> &str {

-        match *self {

-            ParseHeaderError::MissingContentDisposition => "\"Content-Disposition\" header not found",

-            ParseHeaderError::Invalid(_) => "the header is not formatted correctly",

-            ParseHeaderError::Io(_) => "failed to read the header",

-            ParseHeaderError::Other(_) => "unknown parsing error",

+        InvalidContDisp(reason: &'static str, cause: String) {

+            display(x) -> ("{}: {}: {}", x.description(), reason, cause)

+            description("invalid \"Content-Disposition\" header")

         }

-    }

-

-    fn cause(&self) -> Option<&error::Error> {

-        match *self {

-            ParseHeaderError::Io(ref e) => Some(e),

-            _ => None,

+        /// The header was found but could not be parsed

+        TokenizeError(err: HttparseError) {

+            description(GENERIC_PARSE_ERR)

+            display(x) -> ("{}: {}", x.description(), err)

+            cause(err)

+            from()

         }

-    }

-}

-

-impl From<io::Error> for ParseHeaderError {

-    fn from(err: io::Error) -> ParseHeaderError {

-        ParseHeaderError::Io(err)

-    }

-}

-

-impl From<httparse::Error> for ParseHeaderError {

-    fn from(err: httparse::Error) -> ParseHeaderError {

-        ParseHeaderError::Invalid(format!("{}", err))

+        MimeError(cont_type: String) {

+            description("Failed to parse Content-Type")

+            display(this) -> ("{}: {}", this.description(), cont_type)

+        }

+        TooLarge {

+            description("field headers section ridiculously long or missing trailing CRLF-CRLF")

+        }

+        /// IO error

+        Io(err: io::Error) {

+            description("an io error occurred while parsing the headers")

+            display(x) -> ("{}: {}", x.description(), err)

+            cause(err)

+            from()

+        }

     }

 }

 

diff --git a/rustc_deps/vendor/multipart/src/server/hyper.rs b/rustc_deps/vendor/multipart/src/server/hyper.rs
index 24b2992..1d1b596 100644
--- a/rustc_deps/vendor/multipart/src/server/hyper.rs
+++ b/rustc_deps/vendor/multipart/src/server/hyper.rs
@@ -16,7 +16,7 @@
 

 pub use hyper::server::Request as HyperRequest;

 

-use mime::{Mime, TopLevel, SubLevel, Attr, Value};

+use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};

 

 use super::{Multipart, HttpRequest};

 

@@ -36,10 +36,7 @@
     /// Create a new `Switch` instance where

     /// `normal` handles normal Hyper requests and `multipart` handles Multipart requests

     pub fn new(normal: H, multipart: M) -> Switch<H, M> {

-        Switch {

-            normal: normal,

-            multipart: multipart,

-        }

+        Switch { normal, multipart }

     }

 }

 

diff --git a/rustc_deps/vendor/multipart/src/server/iron.rs b/rustc_deps/vendor/multipart/src/server/iron.rs
index a2fb25b..9e320a2 100644
--- a/rustc_deps/vendor/multipart/src/server/iron.rs
+++ b/rustc_deps/vendor/multipart/src/server/iron.rs
@@ -12,7 +12,7 @@
 use std::path::PathBuf;

 use std::{error, fmt, io};

 

-use super::{HttpRequest, Multipart};

+use super::{FieldHeaders, HttpRequest, Multipart};

 use super::save::{Entries, PartialReason, TempDir};

 use super::save::SaveResult::*;

 

@@ -57,7 +57,7 @@
 /// fn main() {

 ///     let mut chain = Chain::new(|req: &mut Request| if let Some(entries) =

 ///         req.extensions.get::<Entries>() {

-///         

+///

 ///         Ok(Response::with(format!("{:?}", entries)))

 ///     } else {

 ///         Ok(Response::with("Not a multipart request"))

@@ -74,14 +74,14 @@
 pub struct Intercept {

     /// The parent directory for all temporary directories created by this middleware.

     /// Will be created if it doesn't exist (lazy).

-    /// 

+    ///

     /// If omitted, uses the OS temporary directory.

     ///

     /// Default value: `None`.

     pub temp_dir_path: Option<PathBuf>,

     /// The size limit of uploaded files, in bytes.

     ///

-    /// Files which exceed this size will be rejected. 

+    /// Files which exceed this size will be rejected.

     /// See the `limit_behavior` field for more info.

     ///

     /// Default value: [`DEFAULT_FILE_SIZE_LIMIT`](constant.default_file_size_limit.html)

@@ -97,7 +97,7 @@
     pub limit_behavior: LimitBehavior,

 }

 

-impl Intercept { 

+impl Intercept {

     /// Set the `temp_dir_path` for this middleware.

     pub fn temp_dir_path<P: Into<PathBuf>>(self, path: P) -> Self {

         Intercept { temp_dir_path: Some(path.into()), .. self }

@@ -142,14 +142,14 @@
                               .count_limit(self.file_count_limit)

                               .with_temp_dir(tempdir) {

             Full(entries) => Ok(Some(entries)),

+            Partial(_, PartialReason::Utf8Error(_)) => unreachable!(),

             Partial(_, PartialReason::IoError(err)) => Err(io_to_iron(err, "Error midway through request")),

             Partial(_, PartialReason::CountLimit) => Err(FileCountLimitError(self.file_count_limit).into()),

             Partial(partial, PartialReason::SizeLimit) =>  {

-                let partial_file = partial.partial_file.expect(EXPECT_PARTIAL_FILE);

+                let partial = partial.partial.expect(EXPECT_PARTIAL_FILE);

                 Err(

                     FileSizeLimitError {

-                        field: partial_file.field_name,

-                        filename: partial_file.source.filename,

+                        field: partial.source.headers,

                     }.into()

                 )

             },

@@ -182,8 +182,8 @@
 

 type IronMultipart<'r, 'a, 'b> = Multipart<&'r mut IronBody<'a, 'b>>;

 

-const EXPECT_PARTIAL_FILE: &'static str = "File size limit hit but the offending \

-                                           file was not available; this is a bug.";

+const EXPECT_PARTIAL_FILE: &str = "File size limit hit but the offending \

+                                   file was not available; this is a bug.";

 

 impl Default for Intercept {

     fn default() -> Self {

@@ -229,9 +229,7 @@
 #[derive(Debug)]

 pub struct FileSizeLimitError {

     /// The field where the error occurred.

-    pub field: String,

-    /// The filename of the oversize file, if it was provided.

-    pub filename: Option<String>,

+    pub field: FieldHeaders,

 }

 

 impl error::Error for FileSizeLimitError {

@@ -242,9 +240,9 @@
 

 impl fmt::Display for FileSizeLimitError {

     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

-        match self.filename {

-            Some(ref filename) => write!(f, "File size limit reached for field \"{}\" (filename: \"{}\")", self.field, filename),

-            None => write!(f, "File size limit reached for field \"{}\" (no filename)", self.field),

+        match self.field.filename {

+            Some(ref filename) => write!(f, "File size limit reached for field \"{}\" (filename: \"{}\")", self.field.name, filename),

+            None => write!(f, "File size limit reached for field \"{}\" (no filename)", self.field.name),

         }

     }

 }

diff --git a/rustc_deps/vendor/multipart/src/server/mod.rs b/rustc_deps/vendor/multipart/src/server/mod.rs
index d882c04..50716fe 100644
--- a/rustc_deps/vendor/multipart/src/server/mod.rs
+++ b/rustc_deps/vendor/multipart/src/server/mod.rs
@@ -11,28 +11,38 @@
 //!

 //! See the `Multipart` struct for more info.

 

-extern crate buf_redux;

+pub extern crate buf_redux;

 extern crate httparse;

 extern crate twoway;

 

 use std::borrow::Borrow;

 use std::io::prelude::*;

-use std::path::Path;

+use std::sync::Arc;

 use std::io;

 

-use tempdir::TempDir;

-

 use self::boundary::BoundaryReader;

 

 use self::field::PrivReadEntry;

 

-pub use self::field::{MultipartField, MultipartFile, MultipartData, ReadEntry, ReadEntryResult};

+pub use self::field::{FieldHeaders, MultipartField, MultipartData, ReadEntry, ReadEntryResult};

 

 use self::save::SaveBuilder;

 

-pub use self::save::{Entries, SaveResult, SavedFile};

+pub use self::save::{Entries, SaveResult, SavedField};

 

-use self::save::EntriesSaveResult;

+/// Default typedef for shared strings.

+///

+/// Enable the `use_arc_str` feature to use `Arc<str>` instead, which saves an indirection but

+/// cannot be constructed in Rust versions older than 1.21 (the `From<String>` impl was stabilized

+/// in that release).

+#[cfg(not(feature = "use_arc_str"))]

+pub type ArcStr = Arc<String>;

+

+/// Optimized typedef for shared strings, replacing `Arc<String>`.

+///

+/// Enabled with the `use_arc_str` feature.

+#[cfg(feature = "use_arc_str")]

+pub type ArcStr = Arc<str>;

 

 macro_rules! try_opt (

     ($expr:expr) => (

@@ -73,6 +83,9 @@
 #[cfg(feature = "tiny_http")]

 pub mod tiny_http;

 

+#[cfg(feature = "nickel")]

+pub mod nickel;

+

 pub mod save;

 

 /// The server-side implementation of `multipart/form-data` requests.

@@ -110,7 +123,7 @@
     pub fn with_body<Bnd: Into<String>>(body: R, boundary: Bnd) -> Self {

         let boundary = boundary.into();

 

-        info!("Multipart::with_boundary(_, {:?}", boundary);

+        info!("Multipart::with_boundary(_, {:?})", boundary);

 

         Multipart { 

             reader: BoundaryReader::from_reader(body, boundary),

@@ -155,56 +168,6 @@
     pub fn save(&mut self) -> SaveBuilder<&mut Self> {

         SaveBuilder::new(self)

     }

-

-    /// Read the request fully, parsing all fields and saving all files in a new temporary

-    /// directory under the OS temporary directory. 

-    ///

-    /// If there is an error in reading the request, returns the partial result along with the

-    /// error. See [`SaveResult`](save/enum.SaveResult.html) for more information.

-    #[deprecated(since = "0.10.0", note = "use `.save().temp()` instead")]

-    pub fn save_all(&mut self) -> EntriesSaveResult<&mut Self> {

-        self.save().temp()

-    }

-

-    /// Read the request fully, parsing all fields and saving all files in a new temporary

-    /// directory under `dir`. 

-    ///

-    /// If there is an error in reading the request, returns the partial result along with the

-    /// error. See [`SaveResult`](save/enum.SaveResult.html) for more information.

-    #[deprecated(since = "0.10.0", note = "use `.save().with_temp_dir()` instead")]

-    pub fn save_all_under<P: AsRef<Path>>(&mut self, dir: P) -> EntriesSaveResult<&mut Self> {

-        match TempDir::new_in(dir, "multipart") {

-            Ok(temp_dir) => self.save().with_temp_dir(temp_dir),

-            Err(err) => SaveResult::Error(err),

-        }

-    }

-

-    /// Read the request fully, parsing all fields and saving all fields in a new temporary

-    /// directory under the OS temporary directory.

-    ///

-    /// Files larger than `limit` will be truncated to `limit`.

-    ///

-    /// If there is an error in reading the request, returns the partial result along with the

-    /// error. See [`SaveResult`](save/enum.SaveResult.html) for more information.

-    #[deprecated(since = "0.10.0", note = "use `.save().size_limit(limit)` instead")]

-    pub fn save_all_limited(&mut self, limit: u64) -> EntriesSaveResult<&mut Self> {

-        self.save().size_limit(limit).temp()

-    }

-

-    /// Read the request fully, parsing all fields and saving all files in a new temporary

-    /// directory under `dir`. 

-    ///

-    /// Files larger than `limit` will be truncated to `limit`.

-    ///

-    /// If there is an error in reading the request, returns the partial result along with the

-    /// error. See [`SaveResult`](save/enum.SaveResult.html) for more information.

-    #[deprecated(since = "0.10.0", note = "use `.save().size_limit(limit).with_temp_dir()` instead")]

-    pub fn save_all_under_limited<P: AsRef<Path>>(&mut self, dir: P, limit: u64) -> EntriesSaveResult<&mut Self> {

-        match TempDir::new_in(dir, "multipart") {

-            Ok(temp_dir) => self.save().size_limit(limit).with_temp_dir(temp_dir),

-            Err(err) => SaveResult::Error(err),

-        }

-    }

 }

 

 impl<R> Borrow<R> for Multipart<R> {

@@ -216,12 +179,16 @@
 impl<R: Read> PrivReadEntry for Multipart<R> {

     type Source = BoundaryReader<R>;

 

-    fn source(&mut self) -> &mut BoundaryReader<R> {

+    fn source_mut(&mut self) -> &mut BoundaryReader<R> {

         &mut self.reader

     }

 

+    fn set_min_buf_size(&mut self, min_buf_size: usize) {

+        self.reader.set_min_buf_size(min_buf_size)

+    }

+

     /// Consume the next boundary.

-    /// Returns `true` if the last boundary was read, `false` otherwise.

+    /// Returns `true` if a field should follow this boundary, `false` otherwise.

     fn consume_boundary(&mut self) -> io::Result<bool> {

         debug!("Consume boundary!");

         self.reader.consume_boundary()

@@ -245,3 +212,96 @@
     /// Return the request body for reading.

     fn body(self) -> Self::Body;

 }

+

+#[test]

+fn issue_104() {

+    ::init_log();

+

+    use std::io::Cursor;

+

+    let body = "\

+    POST /test.html HTTP/1.1\r\n\

+    Host: example.org\r\n\

+    Content-Type: multipart/form-data;boundary=\"boundary\"\r\n\r\n\

+    Content-Disposition: form-data; name=\"field1\"\r\n\r\n\

+    value1\r\n\

+    Content-Disposition: form-data; name=\"field2\"; filename=\"example.txt\"\r\n\r\n\

+    value2 ";

+

+    let request = Cursor::new(body);

+

+    let mut multipart = Multipart::with_body(request, "boundary");

+    multipart.foreach_entry(|_field| {/* Do nothing */}).unwrap_err();

+}

+

+#[test]

+fn issue_114() {

+    ::init_log();

+

+    fn consume_all<R: BufRead>(mut rdr: R) {

+        let mut consume = 0;

+

+        loop {

+            let consume = rdr.fill_buf().unwrap().len();

+            if consume == 0 { return; }

+            rdr.consume(consume);

+        }

+    }

+

+    use std::io::Cursor;

+

+    let body = "\

+    --------------------------c616e5fded96a3c7\r\n\

+    Content-Disposition: form-data; name=\"key1\"\r\n\r\n\

+    v1,\r\n\

+    --------------------------c616e5fded96a3c7\r\n\

+    Content-Disposition: form-data; name=\"key2\"\r\n\r\n\

+    v2,\r\n\

+    --------------------------c616e5fded96a3c7\r\n\

+    Content-Disposition: form-data; name=\"key3\"\r\n\r\n\

+    v3\r\n\

+    --------------------------c616e5fded96a3c7--\r\n";

+

+    let request = Cursor::new(body);

+    let mut multipart = Multipart::with_body(request, "------------------------c616e5fded96a3c7");

+

+    // one error if you do nothing

+    multipart.foreach_entry(|_entry| { /* do nothing */}).unwrap();

+

+    // a different error if you skip the first field

+    multipart.foreach_entry(|mut entry| if *entry.headers.name != "key1" { consume_all(entry.data); })

+        .unwrap();

+

+

+    multipart.foreach_entry(|mut entry| () /* match entry.headers.name.as_str() {

+        "file" => {

+            let mut vec = Vec::new();

+            entry.data.read_to_end(&mut vec).expect("can't read");

+            // message.file = String::from_utf8(vec).ok();

+            println!("key file got");

+        }

+

+        "key1" => {

+            let mut vec = Vec::new();

+            entry.data.read_to_end(&mut vec).expect("can't read");

+            // message.key1 = String::from_utf8(vec).ok();

+            println!("key1 got");

+        }

+

+        "key2" => {

+            let mut vec = Vec::new();

+            entry.data.read_to_end(&mut vec).expect("can't read");

+            // message.key2 = String::from_utf8(vec).ok();

+            println!("key2 got");

+        }

+

+        _ => {

+            // as multipart has a bug https://github.com/abonander/multipart/issues/114

+            // we manually do read_to_end here

+            //let mut _vec = Vec::new();

+            //entry.data.read_to_end(&mut _vec).expect("can't read");

+            println!("key neglected");

+        }

+    }*/)

+    .expect("Unable to iterate multipart?")

+}

diff --git a/rustc_deps/vendor/multipart/src/server/nickel.rs b/rustc_deps/vendor/multipart/src/server/nickel.rs
new file mode 100644
index 0000000..5d91eba
--- /dev/null
+++ b/rustc_deps/vendor/multipart/src/server/nickel.rs
@@ -0,0 +1,69 @@
+//! Support for `multipart/form-data` bodies in [Nickel](https://nickel.rs).

+pub extern crate nickel;

+

+use self::nickel::hyper;

+use self::hyper::header::ContentType;

+

+pub use self::nickel::Request as NickelRequest;

+pub use self::nickel::hyper::server::Request as HyperRequest;

+

+use server::{HttpRequest, Multipart};

+

+/// A wrapper for `&mut nickel::Request` which implements `multipart::server::HttpRequest`.

+///

+/// Necessary because this crate cannot directly provide an impl of `HttpRequest` for

+/// `&mut NickelRequest`.

+pub struct Maybe<'r, 'mw: 'r, 'server: 'mw, D: 'mw>(pub &'r mut NickelRequest<'mw, 'server, D>);

+

+impl<'r, 'mw: 'r, 'server: 'mw, D: 'mw> HttpRequest for Maybe<'r, 'mw, 'server, D> {

+    type Body = &'r mut HyperRequest<'mw, 'server>;

+

+    fn multipart_boundary(&self) -> Option<&str> {

+        // we can't use the impl from the `hyper` module because it might be the wrong version

+        let cont_type = try_opt!(self.0.origin.headers.get::<ContentType>());

+        cont_type.get_param("boundary").map(|v| v.as_str())

+    }

+

+    fn body(self) -> Self::Body {

+        &mut self.0.origin

+    }

+}

+

+/// Extension trait for getting the `multipart/form-data` body from `nickel::Request`.

+///

+/// Implemented for `nickel::Request`.

+pub trait MultipartBody<'mw, 'server> {

+    /// Get a multipart reader for the request body, if the request is of the right type.

+    fn multipart_body(&mut self) -> Option<Multipart<&mut HyperRequest<'mw, 'server>>>;

+}

+

+impl<'mw, 'server, D: 'mw> MultipartBody<'mw, 'server> for NickelRequest<'mw, 'server, D> {

+    fn multipart_body(&mut self) -> Option<Multipart<&mut HyperRequest<'mw, 'server>>> {

+        Multipart::from_request(Maybe(self)).ok()

+    }

+}

+

+impl<'r, 'mw: 'r, 'server: 'mw, D: 'mw> AsRef<&'r mut NickelRequest<'mw, 'server, D>> for Maybe<'r, 'mw, 'server, D> {

+    fn as_ref(&self) -> &&'r mut NickelRequest<'mw, 'server, D> {

+        &self.0

+    }

+}

+

+impl<'r, 'mw: 'r, 'server: 'mw, D: 'mw> AsMut<&'r mut NickelRequest<'mw, 'server, D>> for Maybe<'r, 'mw, 'server, D> {

+    fn as_mut(&mut self) -> &mut &'r mut NickelRequest<'mw, 'server, D> {

+        &mut self.0

+    }

+}

+

+impl<'r, 'mw: 'r, 'server: 'mw, D: 'mw> Into<&'r mut NickelRequest<'mw, 'server, D>> for Maybe<'r, 'mw, 'server, D> {

+    fn into(self) -> &'r mut NickelRequest<'mw, 'server, D> {

+        self.0

+    }

+}

+

+impl<'r, 'mw: 'r, 'server: 'mw, D: 'mw> From<&'r mut NickelRequest<'mw, 'server, D>> for Maybe<'r, 'mw, 'server, D> {

+    fn from(req: &'r mut NickelRequest<'mw, 'server, D>) -> Self {

+        Maybe(req)

+    }

+}

+

diff --git a/rustc_deps/vendor/multipart/src/server/save.rs b/rustc_deps/vendor/multipart/src/server/save.rs
index bfeeb9d..9594ee4 100644
--- a/rustc_deps/vendor/multipart/src/server/save.rs
+++ b/rustc_deps/vendor/multipart/src/server/save.rs
@@ -6,19 +6,22 @@
 // copied, modified, or distributed except according to those terms.

 //! Utilities for saving request entries to the filesystem.

 

-use mime::Mime;

-

-use super::field::{MultipartData, MultipartFile, ReadEntry, ReadEntryResult};

-

-use self::SaveResult::*;

+pub use server::buf_redux::BufReader;

 

 pub use tempdir::TempDir;

 

 use std::collections::HashMap;

 use std::io::prelude::*;

-use std::fs::OpenOptions;

+use std::fs::{self, File, OpenOptions};

 use std::path::{Path, PathBuf};

-use std::{env, fs, io, mem};

+use std::{cmp, env, io, mem, str, u32, u64};

+

+use server::field::{FieldHeaders, MultipartField, MultipartData, ReadEntry, ReadEntryResult};

+use server::ArcStr;

+

+use self::SaveResult::*;

+use self::TextPolicy::*;

+use self::PartialReason::*;

 

 const RANDOM_FILENAME_LEN: usize = 12;

 

@@ -30,11 +33,40 @@
     ($try:expr) => (

         match $try {

             Ok(val) => val,

-            Err(e) => return SaveResult::Error(e),

+            Err(e) => return Error(e),

         }

     )

 );

 

+macro_rules! try_full (

+    ($try:expr) => {

+        match $try {

+            Full(full) => full,

+            other => return other,

+        }

+    }

+);

+

+macro_rules! try_partial (

+    ($try:expr) => {

+        match $try {

+            Full(full) => return Full(full.into()),

+            Partial(partial, reason) => (partial, reason),

+            Error(e) => return Error(e),

+        }

+    }

+);

+

+#[derive(Clone, Copy, Debug, Eq, PartialEq)]

+enum TextPolicy {

+    /// Attempt to read a text field as text, falling back to binary on error

+    Try,

+    /// Attempt to read a text field as text, returning any errors

+    Force,

+    /// Don't try to read text

+    Ignore

+}

+

 /// A builder for saving a file or files to the local filesystem.

 ///

 /// ### `OpenOptions`

@@ -49,13 +81,42 @@
 /// `mod_open_opts()`.

 ///

 /// ### File Size and Count Limits

-/// You can set a size limit for individual files with `size_limit()`, which takes either `u64`

+/// You can set a size limit for individual fields with `size_limit()`, which takes either `u64`

 /// or `Option<u64>`.

 ///

-/// You can also set the maximum number of files to process with `count_limit()`, which

+/// You can also set the maximum number of fields to process with `count_limit()`, which

 /// takes either `u32` or `Option<u32>`. This only has an effect when using

 /// `SaveBuilder<[&mut] Multipart>`.

 ///

+/// By default, these limits are set conservatively to limit the maximum memory and disk space

+/// usage of a single request. You should set `count_limit` specifically for each request endpoint

+/// based on the number of fields you're expecting (exactly to that number if you're not expecting

+/// duplicate fields).

+///

+/// ### Memory Threshold and Text Policy

+/// By default, small fields (a few kilobytes or smaller) will be read directly to memory

+/// without creating a file. This behavior is controlled by the `memory_threshold()` setter. You can

+/// *roughly* tune the maximum memory a single request uses by tuning

+/// `count_limit * memory_threshold`

+///

+/// If a field appears to contain text data (its content-type is `text/*` or it doesn't declare

+/// one), `SaveBuilder` can read it to a string instead of saving the raw bytes as long as it falls

+/// below the set `memory_threshold`.

+///

+/// By default, the behavior is to attempt to validate the data as UTF-8, falling back to saving

+/// just the bytes if the validation fails at any point. You can restore/ensure this behavior

+/// with the `try_text()` modifier.

+///

+/// Alternatively, you can use the `force_text()` modifier to make the save operation return

+/// an error when UTF-8 decoding fails, though this only holds true while the size is below

+/// `memory_threshold`. The `ignore_text()` modifier turns off UTF-8 validation altogether.

+///

+/// UTF-8 validation is performed incrementally (after every `BufRead::fill_buf()` call)

+/// to hopefully maximize throughput, instead of blocking while the field is read to completion

+/// and performing validation over the entire result at the end. (RFC: this could be a lot of

+/// unnecessary work if most fields end up being written to the filesystem, however, but this

+/// can be turned off with `ignore_text()` if it fits the use-case.)

+///

 /// ### Warning: Do **not** trust user input!

 /// It is a serious security risk to create files or directories with paths based on user input.

 /// A malicious user could craft a path which can be used to overwrite important files, such as

@@ -71,10 +132,13 @@
 pub struct SaveBuilder<S> {

     savable: S,

     open_opts: OpenOptions,

-    size_limit: Option<u64>,

-    count_limit: Option<u32>,

+    size_limit: u64,

+    count_limit: u32,

+    memory_threshold: u64,

+    text_policy: TextPolicy,

 }

 

+/// Common methods for whole requests as well as individual fields.

 impl<S> SaveBuilder<S> {

     /// Implementation detail but not problematic to have accessible.

     #[doc(hidden)]

@@ -83,18 +147,24 @@
         open_opts.write(true).create_new(true);

 

         SaveBuilder {

-            savable: savable,

-            open_opts: open_opts,

-            size_limit: None,

-            count_limit: None,

+            savable,

+            open_opts,

+            // 8 MiB, on the conservative end compared to most frameworks

+            size_limit: 8 * 1024 * 1024,

+            // Arbitrary, I have no empirical data for this

+            count_limit: 256,

+            // 10KiB, used by Apache Commons

+            // https://commons.apache.org/proper/commons-fileupload/apidocs/org/apache/commons/fileupload/disk/DiskFileItemFactory.html

+            memory_threshold: 10 * 1024,

+            text_policy: TextPolicy::Try,

         }

     }

 

     /// Set the maximum number of bytes to write out *per file*.

     ///

-    /// Can be `u64` or `Option<u64>`. If `None`, clears the limit.

+    /// Can be `u64` or `Option<u64>`. If `None` or `u64::MAX`, clears the limit.

     pub fn size_limit<L: Into<Option<u64>>>(mut self, limit: L) -> Self {

-        self.size_limit = limit.into();

+        self.size_limit = limit.into().unwrap_or(u64::MAX);

         self

     }

 

@@ -107,34 +177,73 @@
         self.open_opts.write(true);

         self

     }

+

+    /// Set the threshold at which to switch from copying a field into memory to copying

+    /// it to disk.

+    ///

+    /// If `0`, forces fields to save directly to the filesystem.

+    /// If `u64::MAX`, effectively forces fields to always save to memory.

+    pub fn memory_threshold(self, memory_threshold: u64) -> Self {

+        Self { memory_threshold, ..self }

+    }

+

+    /// When encountering a field that is apparently text, try to read it to a string or fall

+    /// back to binary otherwise.

+    ///

+    /// If set for an individual field (`SaveBuilder<&mut MultipartData<_>>`), will

+    /// always attempt to decode text regardless of the field's `Content-Type`.

+    ///

+    /// Has no effect once `memory_threshold` has been reached.

+    pub fn try_text(self) -> Self {

+        Self { text_policy: TextPolicy::Try, ..self }

+    }

+

+    /// When encountering a field that is apparently text, read it to a string or return an error.

+    ///

+    /// If set for an individual field (`SaveBuilder<&mut MultipartData<_>>`), will

+    /// always attempt to decode text regardless of the field's `Content-Type`.

+    ///

+    /// (RFC: should this continue to validate UTF-8 when writing to the filesystem?)

+    pub fn force_text(self) -> Self {

+        Self { text_policy: TextPolicy::Force, ..self}

+    }

+

+    /// Don't try to read or validate any field data as UTF-8.

+    pub fn ignore_text(self) -> Self {

+        Self { text_policy: TextPolicy::Ignore, ..self }

+    }

 }

 

 /// Save API for whole multipart requests.

 impl<M> SaveBuilder<M> where M: ReadEntry {

-    /// Set the maximum number of files to write out.

+    /// Set the maximum number of fields to process.

     ///

-    /// Can be `u32` or `Option<u32>`. If `None`, clears the limit.

+    /// Can be `u32` or `Option<u32>`. If `None` or `u32::MAX`, clears the limit.

     pub fn count_limit<L: Into<Option<u32>>>(mut self, count_limit: L) -> Self {

-        self.count_limit = count_limit.into();

+        self.count_limit = count_limit.into().unwrap_or(u32::MAX);

         self

     }

 

-    /// Save the file fields in the request to a new temporary directory prefixed with

+    /// Save all fields in the request using a new temporary directory prefixed with

     /// `multipart-rs` in the OS temporary directory.

     ///

     /// For more options, create a `TempDir` yourself and pass it to `with_temp_dir()` instead.

     ///

+    /// See `with_entries()` for more info.

+    ///

     /// ### Note: Temporary

     /// See `SaveDir` for more info (the type of `Entries::save_dir`).

     pub fn temp(self) -> EntriesSaveResult<M> {

         self.temp_with_prefix("multipart-rs")

     }

 

-    /// Save the file fields in the request to a new temporary directory with the given string

+    /// Save all fields in the request using a new temporary directory with the given string

     /// as a prefix in the OS temporary directory.

     ///

     /// For more options, create a `TempDir` yourself and pass it to `with_temp_dir()` instead.

     ///

+    /// See `with_entries()` for more info.

+    ///

     /// ### Note: Temporary

     /// See `SaveDir` for more info (the type of `Entries::save_dir`).

     pub fn temp_with_prefix(self, prefix: &str) -> EntriesSaveResult<M> {

@@ -144,7 +253,9 @@
         }

     }

 

-    /// Save the file fields to the given `TempDir`.

+    /// Save all fields in the request using the given `TempDir`.

+    ///

+    /// See `with_entries()` for more info.

     ///

     /// The `TempDir` is returned in the result under `Entries::save_dir`.

     pub fn with_temp_dir(self, tempdir: TempDir) -> EntriesSaveResult<M> {

@@ -154,267 +265,432 @@
     /// Save the file fields in the request to a new permanent directory with the given path.

     ///

     /// Any nonexistent directories in the path will be created.

+    ///

+    /// See `with_entries()` for more info.

     pub fn with_dir<P: Into<PathBuf>>(self, dir: P) -> EntriesSaveResult<M> {

         let dir = dir.into();

 

         try_start!(create_dir_all(&dir));

 

-        self.with_entries(Entries::new(SaveDir::Perm(dir.into())))

+        self.with_entries(Entries::new(SaveDir::Perm(dir)))

     }

 

     /// Commence the save operation using the existing `Entries` instance.

     ///

     /// May be used to resume a saving operation after handling an error.

-    pub fn with_entries(mut self, mut entries: Entries) -> EntriesSaveResult<M> {

-        let mut count = 0;

+    ///

+    /// If `count_limit` is set, only reads that many fields before returning an error.

+    /// If you wish to resume from `PartialReason::CountLimit`, simply remove some entries.

+    ///

+    /// Note that `PartialReason::CountLimit` will still be returned if the number of fields

+    /// reaches `u32::MAX`, but this would be an extremely degenerate case.

+    pub fn with_entries(self, mut entries: Entries) -> EntriesSaveResult<M> {

+        let SaveBuilder {

+            savable, open_opts, count_limit, size_limit,

+            memory_threshold, text_policy

+        } = self;

 

-        loop {

-            let field = match ReadEntry::read_entry(self.savable) {

+        let mut res = ReadEntry::read_entry(savable);

+

+        let _ = entries.recount_fields();

+

+        let save_field = |field: &mut MultipartField<M>, entries: &Entries| {

+            let text_policy = if field.is_text() { text_policy } else { Ignore };

+

+            let mut saver = SaveBuilder {

+                savable: &mut field.data, open_opts: open_opts.clone(),

+                count_limit, size_limit, memory_threshold, text_policy

+            };

+

+            saver.with_dir(entries.save_dir.as_path())

+        };

+

+        while entries.fields_count < count_limit {

+            let mut field: MultipartField<M> = match res {

                 ReadEntryResult::Entry(field) => field,

-                ReadEntryResult::End(_) => break,

+                ReadEntryResult::End(_) => return Full(entries), // normal exit point

                 ReadEntryResult::Error(_, e) => return Partial (

                     PartialEntries {

-                        entries: entries,

-                        partial_file: None,

+                        entries,

+                        partial: None,

                     },

                     e.into(),

                 )

             };

 

-            match field.data {

-                MultipartData::File(mut file) => {

-                    match self.count_limit {

-                        Some(limit) if count >= limit => return Partial (

-                            PartialEntries {

-                                entries: entries,

-                                partial_file: Some(PartialFileField {

-                                    field_name: field.name,

-                                    source: file,

-                                    dest: None,

-                                })

-                            },

-                            PartialReason::CountLimit,

-                        ),

-                        _ => (),

-                    }

-

-                    count += 1;

-

-                    match file.save().size_limit(self.size_limit).with_dir(&entries.save_dir) {

-                        Full(saved_file) => {

-                            self.savable = file.take_inner();

-                            entries.mut_files_for(field.name).push(saved_file);

-                        },

-                        Partial(partial, reason) => return Partial(

-                            PartialEntries {

-                                entries: entries,

-                                partial_file: Some(PartialFileField {

-                                    field_name: field.name,

-                                    source: file,

-                                    dest: Some(partial)

-                                })

-                            },

-                            reason

-                        ),

-                        Error(e) => return Partial(

-                            PartialEntries {

-                                entries: entries,

-                                partial_file: Some(PartialFileField {

-                                    field_name: field.name,

-                                    source: file,

-                                    dest: None,

-                                }),

-                            },

-                            e.into(),

-                        ),

-                    }

+            let (dest, reason) = match save_field(&mut field, &entries) {

+                Full(saved) => {

+                    entries.push_field(field.headers, saved);

+                    res = ReadEntry::read_entry(field.data.into_inner());

+                    continue;

                 },

-                MultipartData::Text(mut text) => {

-                    self.savable = text.take_inner();

-                    entries.fields.insert(field.name, text.text);

+                Partial(saved, reason) => (Some(saved), reason),

+                Error(error) => (None, PartialReason::IoError(error)),

+            };

+

+            return Partial(

+                PartialEntries {

+                    entries,

+                    partial: Some(PartialSavedField {

+                        source: field,

+                        dest,

+                    }),

                 },

-            }

+                reason

+            );

         }

 

-        SaveResult::Full(entries)

+        Partial(

+            PartialEntries {

+                entries,

+                partial: None,

+            },

+            PartialReason::CountLimit

+        )

     }

 }

 

-/// Save API for individual files.

-impl<'m, M: 'm> SaveBuilder<&'m mut MultipartFile<M>> where MultipartFile<M>: BufRead {

-

-    /// Save to a file with a random alphanumeric name in the OS temporary directory.

-    ///

-    /// Does not use user input to create the path.

+/// Save API for individual fields.

+impl<'m, M: 'm> SaveBuilder<&'m mut MultipartData<M>> where MultipartData<M>: BufRead {

+    /// Save the field data, potentially using a file with a random name in the

+    /// OS temporary directory.

     ///

     /// See `with_path()` for more details.

-    pub fn temp(&mut self) -> FileSaveResult {

+    pub fn temp(&mut self) -> FieldSaveResult {

         let path = env::temp_dir().join(rand_filename());

         self.with_path(path)

     }

 

-    /// Save to a file with the given name in the OS temporary directory.

+    /// Save the field data, potentially using a file with the given name in

+    /// the OS temporary directory.

     ///

     /// See `with_path()` for more details.

-    ///

-    /// ### Warning: Do **not* trust user input!

-    /// It is a serious security risk to create files or directories with paths based on user input.

-    /// A malicious user could craft a path which can be used to overwrite important files, such as

-    /// web templates, static assets, Javascript files, database files, configuration files, etc.,

-    /// if they are writable by the server process.

-    ///

-    /// This can be mitigated somewhat by setting filesystem permissions as

-    /// conservatively as possible and running the server under its own user with restricted

-    /// permissions, but you should still not use user input directly as filesystem paths.

-    /// If it is truly necessary, you should sanitize filenames such that they cannot be

-    /// misinterpreted by the OS.

-    pub fn with_filename(&mut self, filename: &str) -> FileSaveResult {

+    pub fn with_filename(&mut self, filename: &str) -> FieldSaveResult {

         let mut tempdir = env::temp_dir();

         tempdir.set_file_name(filename);

 

         self.with_path(tempdir)

     }

 

-    /// Save to a file with a random alphanumeric name in the given directory.

+    /// Save the field data, potentially using a file with a random alphanumeric name

+    /// in the given directory.

     ///

     /// See `with_path()` for more details.

-    ///

-    /// ### Warning: Do **not* trust user input!

-    /// It is a serious security risk to create files or directories with paths based on user input.

-    /// A malicious user could craft a path which can be used to overwrite important files, such as

-    /// web templates, static assets, Javascript files, database files, configuration files, etc.,

-    /// if they are writable by the server process.

-    ///

-    /// This can be mitigated somewhat by setting filesystem permissions as

-    /// conservatively as possible and running the server under its own user with restricted

-    /// permissions, but you should still not use user input directly as filesystem paths.

-    /// If it is truly necessary, you should sanitize filenames such that they cannot be

-    /// misinterpreted by the OS.

-    pub fn with_dir<P: AsRef<Path>>(&mut self, dir: P) -> FileSaveResult {

+    pub fn with_dir<P: AsRef<Path>>(&mut self, dir: P) -> FieldSaveResult {

         let path = dir.as_ref().join(rand_filename());

         self.with_path(path)

     }

 

-    /// Save to a file with the given path.

+    /// Save the field data, potentially using a file with the given path.

     ///

-    /// Creates any missing directories in the path.

+    /// Creates any missing directories in the path (RFC: skip this step?).

     /// Uses the contained `OpenOptions` to create the file.

-    /// Truncates the file to the given limit, if set.

-    pub fn with_path<P: Into<PathBuf>>(&mut self, path: P) -> FileSaveResult {

+    /// Truncates the file to the given `size_limit`, if set.

+    ///

+    /// The no directories or files will be created until the set `memory_threshold` is reached.

+    /// If `size_limit` is set and less than or equal to `memory_threshold`,

+    /// then the disk will never be touched.

+    pub fn with_path<P: Into<PathBuf>>(&mut self, path: P) -> FieldSaveResult {

+        let bytes = if self.text_policy != Ignore {

+            let (text, reason) = try_partial!(self.save_text());

+            match reason {

+                SizeLimit if !self.cmp_size_limit(text.len()) => text.into_bytes(),

+                Utf8Error(_) if self.text_policy != Force => text.into_bytes(),

+                other => return Partial(text.into(), other),

+            }

+        } else {

+            Vec::new()

+        };

+

+        let (bytes, reason) = try_partial!(self.save_mem(bytes));

+

+        match reason {

+            SizeLimit if !self.cmp_size_limit(bytes.len()) => (),

+            other => return Partial(bytes.into(), other)

+        }

+

         let path = path.into();

 

-        let saved = SavedFile {

-            content_type: self.savable.content_type.clone(),

-            filename: self.savable.filename.clone(),

-            path: path,

-            size: 0,

-        };

-

-        let file = match create_dir_all(&saved.path).and_then(|_| self.open_opts.open(&saved.path)) {

+        let mut file = match create_dir_all(&path).and_then(|_| self.open_opts.open(&path)) {

             Ok(file) => file,

-            Err(e) => return Partial(saved, e.into())

+            Err(e) => return Error(e),

         };

 

-        self.write_to(file).map(move |written| saved.with_size(written))

+        let data = try_full!(

+            try_write_all(&bytes, &mut file)

+                .map(move |size| SavedData::File(path, size as u64))

+        );

+

+        self.write_to(file).map(move |written| data.add_size(written))

     }

 

 

-    /// Write out the file field to `dest`, truncating if a limit was set.

+    /// Write out the field data to `dest`, truncating if a limit was set.

     ///

     /// Returns the number of bytes copied, and whether or not the limit was reached

     /// (tested by `MultipartFile::fill_buf().is_empty()` so no bytes are consumed).

     ///

     /// Retries on interrupts.

     pub fn write_to<W: Write>(&mut self, mut dest: W) -> SaveResult<u64, u64> {

-        if let Some(limit) = self.size_limit {

-            let copied = match try_copy_buf(self.savable.take(limit), &mut dest) {

-                Full(copied) => copied,

-                other => return other,

-            };

-

-            // If there's more data to be read, the field was truncated

-            match self.savable.fill_buf() {

-                Ok(buf) if buf.is_empty() => Full(copied),

-                Ok(_) => Partial(copied, PartialReason::SizeLimit),

-                Err(e) => Partial(copied, PartialReason::IoError(e))

-            }

+        if self.size_limit < u64::MAX {

+            try_copy_limited(&mut self.savable, |buf| try_write_all(buf, &mut dest), self.size_limit)

         } else {

-            try_copy_buf(&mut self.savable, &mut dest)

+            try_read_buf(&mut self.savable, |buf| try_write_all(buf, &mut dest))

+        }

+    }

+

+    fn save_mem(&mut self, mut bytes: Vec<u8>) -> SaveResult<Vec<u8>, Vec<u8>> {

+        let pre_read = bytes.len() as u64;

+        match self.read_mem(|buf| { bytes.extend_from_slice(buf); Full(buf.len()) }, pre_read) {

+            Full(_) => Full(bytes),

+            Partial(_, reason) => Partial(bytes, reason),

+            Error(e) => if !bytes.is_empty() { Partial(bytes, e.into()) }

+            else { Error(e) }

+        }

+

+    }

+

+    fn save_text(&mut self) -> SaveResult<String, String> {

+        let mut string = String::new();

+

+        // incrementally validate UTF-8 to do as much work as possible during network activity

+        let res = self.read_mem(|buf| {

+            match str::from_utf8(buf) {

+                Ok(s) => { string.push_str(s); Full(buf.len()) },

+                // buffer should always be bigger

+                Err(e) => if buf.len() < 4 {

+                        Partial(0, e.into())

+                    } else {

+                        string.push_str(str::from_utf8(&buf[..e.valid_up_to()]).unwrap());

+                        Full(e.valid_up_to())

+                    }

+            }

+        }, 0);

+

+        match res {

+            Full(_) => Full(string),

+            Partial(_, reason) => Partial(string, reason),

+            Error(e) => Error(e),

+        }

+    }

+

+    fn read_mem<Wb: FnMut(&[u8]) -> SaveResult<usize, usize>>(&mut self, with_buf: Wb, pre_read: u64) -> SaveResult<u64, u64> {

+        let limit = cmp::min(self.size_limit, self.memory_threshold)

+            .saturating_sub(pre_read);

+        try_copy_limited(&mut self.savable, with_buf, limit)

+    }

+

+    fn cmp_size_limit(&self, size: usize) -> bool {

+        size as u64 >= self.size_limit

+    }

+}

+

+/// A field that has been saved (to memory or disk) from a multipart request.

+#[derive(Debug)]

+pub struct SavedField {

+    /// The headers of the field that was saved.

+    pub headers: FieldHeaders,

+    /// The data of the field which may reside in memory or on disk.

+    pub data: SavedData,

+}

+

+/// A saved field's data container (in memory or on disk)

+#[derive(Debug)]

+pub enum SavedData {

+    /// Validated UTF-8 text data.

+    Text(String),

+    /// Binary data.

+    Bytes(Vec<u8>),

+    /// A path to a file on the filesystem and its size as written by `multipart`.

+    File(PathBuf, u64),

+}

+

+impl SavedData {

+    /// Get an adapter for this data which implements `Read`.

+    ///

+    /// If the data is in a file, the file is opened in read-only mode.

+    pub fn readable(&self) -> io::Result<DataReader> {

+        use self::SavedData::*;

+

+        match *self {

+            Text(ref text) => Ok(DataReader::Bytes(text.as_ref())),

+            Bytes(ref bytes) => Ok(DataReader::Bytes(bytes)),

+            File(ref path, _) => Ok(DataReader::File(BufReader::new(fs::File::open(path)?))),

+        }

+    }

+

+    /// Get the size of the data, in memory or on disk.

+    ///

+    /// #### Note

+    /// The size on disk may not match the size of the file if it is externally modified.

+    pub fn size(&self) -> u64 {

+        use self::SavedData::*;

+

+        match *self {

+            Text(ref text) => text.len() as u64,

+            Bytes(ref bytes) => bytes.len() as u64,

+            File(_, size) => size,

+        }

+    }

+

+    /// Returns `true` if the data is known to be in memory (`Text | Bytes`)

+    pub fn is_memory(&self) -> bool {

+        use self::SavedData::*;

+

+        match *self {

+            Text(_) | Bytes(_) => true,

+            File(_, _) => false,

+        }

+    }

+

+    fn add_size(self, add: u64) -> Self {

+        use self::SavedData::File;

+

+        match self {

+            File(path, size) => File(path, size.saturating_add(add)),

+            other => other

         }

     }

 }

 

-/// A file saved to the local filesystem from a multipart request.

-#[derive(Debug)]

-pub struct SavedFile {

-    /// The complete path this file was saved at.

-    pub path: PathBuf,

-

-    /// ### Warning: Client Provided / Untrustworthy

-    /// You should treat this value as **untrustworthy** because it is an arbitrary string

-    /// provided by the client.

-    ///

-    /// It is a serious security risk to create files or directories with paths based on user input.

-    /// A malicious user could craft a path which can be used to overwrite important files, such as

-    /// web templates, static assets, Javascript files, database files, configuration files, etc.,

-    /// if they are writable by the server process.

-    ///

-    /// This can be mitigated somewhat by setting filesystem permissions as

-    /// conservatively as possible and running the server under its own user with restricted

-    /// permissions, but you should still not use user input directly as filesystem paths.

-    /// If it is truly necessary, you should sanitize filenames such that they cannot be

-    /// misinterpreted by the OS. Such functionality is outside the scope of this crate.

-    pub filename: Option<String>,

-

-    /// The MIME type (`Content-Type` value) of this file, if supplied by the client,

-    /// or `"applicaton/octet-stream"` otherwise.

-    ///

-    /// ### Note: Client Provided

-    /// Consider this value to be potentially untrustworthy, as it is provided by the client.

-    /// It may be inaccurate or entirely wrong, depending on how the client determined it.

-    ///

-    /// Some variants wrap arbitrary strings which could be abused by a malicious user if your

-    /// application performs any non-idempotent operations based on their value, such as

-    /// starting another program or querying/updating a database (web-search "SQL injection").

-    pub content_type: Mime,

-

-    /// The number of bytes written to the disk.

-    pub size: u64,

-}

-

-impl SavedFile {

-    fn with_size(self, size: u64) -> Self {

-        SavedFile { size: size, .. self }

+impl From<String> for SavedData {

+    fn from(s: String) -> Self {

+        SavedData::Text(s)

     }

 }

 

-/// A result of `Multipart::save_all()`.

+impl From<Vec<u8>> for SavedData {

+    fn from(b: Vec<u8>) -> Self {

+        SavedData::Bytes(b)

+    }

+}

+

+/// A `Read` (and `BufRead`) adapter for `SavedData`

+pub enum DataReader<'a> {

+    /// In-memory data source (`SavedData::Bytes | Text`)

+    Bytes(&'a [u8]),

+    /// On-disk data source (`SavedData::File`)

+    File(BufReader<File>),

+}

+

+impl<'a> Read for DataReader<'a> {

+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {

+        use self::DataReader::*;

+

+        match *self {

+            Bytes(ref mut bytes) => bytes.read(buf),

+            File(ref mut file) => file.read(buf),

+        }

+    }

+}

+

+impl<'a> BufRead for DataReader<'a> {

+    fn fill_buf(&mut self) -> io::Result<&[u8]> {

+        use self::DataReader::*;

+

+        match *self {

+            Bytes(ref mut bytes) => bytes.fill_buf(),

+            File(ref mut file) => file.fill_buf(),

+        }

+    }

+

+    fn consume(&mut self, amt: usize) {

+        use self::DataReader::*;

+

+        match *self {

+            Bytes(ref mut bytes) => bytes.consume(amt),

+            File(ref mut file) => file.consume(amt),

+        }

+    }

+}

+

+/// A result of `Multipart::save()`.

 #[derive(Debug)]

 pub struct Entries {

-    /// The text fields of the multipart request, mapped by field name -> value.

-    pub fields: HashMap<String, String>,

-    /// A map of file field names to their contents saved on the filesystem.

-    pub files: HashMap<String, Vec<SavedFile>>,

-    /// The directory the files in this request were saved under; may be temporary or permanent.

+    /// The fields of the multipart request, mapped by field name -> value.

+    ///

+    /// A field name may have multiple actual fields associated with it, but the most

+    /// common case is a single field.

+    ///

+    /// Each vector is guaranteed not to be empty unless externally modified.

+    // Even though individual fields might only have one entry, it's better to limit the

+    // size of a value type in `HashMap` to improve cache efficiency in lookups.

+    pub fields: HashMap<ArcStr, Vec<SavedField>>,

+    /// The directory that the entries in `fields` were saved into.

     pub save_dir: SaveDir,

+    fields_count: u32,

 }

 

 impl Entries {

-    fn new(save_dir: SaveDir) -> Self {

+    /// Create a new `Entries` with the given `SaveDir`

+    pub fn new(save_dir: SaveDir) -> Self {

         Entries {

             fields: HashMap::new(),

-            files: HashMap::new(),

-            save_dir: save_dir,

+            save_dir,

+            fields_count: 0,

         }

     }

 

-    /// Returns `true` if both `fields` and `files` are empty, `false` otherwise.

+    /// Returns `true` if `fields` is empty, `false` otherwise.

     pub fn is_empty(&self) -> bool {

-        self.fields.is_empty() && self.files.is_empty()

+        self.fields.is_empty()

     }

 

-    fn mut_files_for(&mut self, field: String) -> &mut Vec<SavedFile> {

-        self.files.entry(field).or_insert_with(Vec::new)

+    /// The number of actual fields contained within this `Entries`.

+    ///

+    /// Effectively `self.fields.values().map(Vec::len).sum()` but maintained separately.

+    ///

+    /// ## Note

+    /// This will be incorrect if `fields` is modified externally. Call `recount_fields()`

+    /// to get the correct count.

+    pub fn fields_count(&self) -> u32 {

+        self.fields_count

+    }

+

+    /// Sum the number of fields in this `Entries` and then return the updated value.

+    pub fn recount_fields(&mut self) -> u32 {

+        let fields_count = self.fields.values().map(Vec::len).sum();

+        // saturating cast

+        self.fields_count = cmp::min(u32::MAX as usize, fields_count) as u32;

+        self.fields_count

+    }

+

+    fn push_field(&mut self, mut headers: FieldHeaders, data: SavedData) {

+        use std::collections::hash_map::Entry::*;

+

+        match self.fields.entry(headers.name.clone()) {

+            Vacant(vacant) => { vacant.insert(vec![SavedField { headers, data }]); },

+            Occupied(occupied) => {

+                // dedup the field name by reusing the key's `Arc`

+                headers.name = occupied.key().clone();

+                occupied.into_mut().push({ SavedField { headers, data }});

+            },

+        }

+

+        self.fields_count = self.fields_count.saturating_add(1);

+    }

+

+    /// Print all fields and their contents to stdout. Mostly for testing purposes.

+    pub fn print_debug(&self) -> io::Result<()> {

+        let stdout = io::stdout();

+        let stdout_lock = stdout.lock();

+        self.write_debug(stdout_lock)

+    }

+

+    /// Write all fields and their contents to the given output. Mostly for testing purposes.

+    pub fn write_debug<W: Write>(&self, mut writer: W) -> io::Result<()> {

+        for (name, entries) in &self.fields {

+            writeln!(writer, "Field {:?} has {} entries:", name, entries.len())?;

+

+            for (idx, field) in entries.iter().enumerate() {

+                let mut data = field.data.readable()?;

+                let headers = &field.headers;

+                writeln!(writer, "{}: {:?} ({:?}):", idx, headers.filename, headers.content_type)?;

+                io::copy(&mut data, &mut writer)?;

+            }

+        }

+

+        Ok(())

     }

 }

 

@@ -427,7 +703,7 @@
     /// This directory is permanent and will be left on the filesystem when this wrapper is dropped.

     ///

     /// **N.B.** If this directory is in the OS temporary directory then it may still be

-    /// deleted at any time, usually on reboot or when free space is low.

+    /// deleted at any time.

     Perm(PathBuf),

 }

 

@@ -515,11 +791,19 @@
     SizeLimit,

     /// An error occurred during the operation.

     IoError(io::Error),

+    /// An error returned from validating a field as UTF-8 due to `SaveBuilder::force_text()`

+    Utf8Error(str::Utf8Error),

 }

 

 impl From<io::Error> for PartialReason {

     fn from(e: io::Error) -> Self {

-        PartialReason::IoError(e)

+        IoError(e)

+    }

+}

+

+impl From<str::Utf8Error> for PartialReason {

+    fn from(e: str::Utf8Error) -> Self {

+        Utf8Error(e)

     }

 }

 

@@ -539,18 +823,17 @@
     }

 }

 

-/// The file field that was being read when the save operation quit.

+/// The field that was being read when the save operation quit.

 ///

 /// May be partially saved to the filesystem if `dest` is `Some`.

 #[derive(Debug)]

-pub struct PartialFileField<M> {

-    /// The field name for the partial file.

-    pub field_name: String,

-    /// The partial file's source in the multipart stream (may be partially read if `dest`

-    /// is `Some`).

-    pub source: MultipartFile<M>,

-    /// The partial file's entry on the filesystem, if the operation got that far.

-    pub dest: Option<SavedFile>,

+pub struct PartialSavedField<M: ReadEntry> {

+    /// The field that was being read.

+    ///

+    /// May be partially read if `dest` is `Some`.

+    pub source: MultipartField<M>,

+    /// The data from the saving operation, if it got that far.

+    pub dest: Option<SavedData>,

 }

 

 /// The partial result type for `Multipart::save*()`.

@@ -559,30 +842,30 @@
 /// saved file that was in the process of being read when the error occurred,

 /// if applicable.

 #[derive(Debug)]

-pub struct PartialEntries<M> {

+pub struct PartialEntries<M: ReadEntry> {

     /// The entries that were saved successfully.

     pub entries: Entries,

-    /// The file that was in the process of being read. `None` if the error

-    /// occurred between file entries.

-    pub partial_file: Option<PartialFileField<M>>,

+    /// The field that was in the process of being read. `None` if the error

+    /// occurred between entries.

+    pub partial: Option<PartialSavedField<M>>,

 }

 

-/// Discards `partial_file`

-impl<M> Into<Entries> for PartialEntries<M> {

+/// Discards `partial`

+impl<M: ReadEntry> Into<Entries> for PartialEntries<M> {

     fn into(self) -> Entries {

         self.entries

     }

 }

 

-impl<M> PartialEntries<M> {

-    /// If `partial_file` is present and contains a `SavedFile` then just

+impl<M: ReadEntry> PartialEntries<M> {

+    /// If `partial` is present and contains a `SavedFile` then just

     /// add it to the `Entries` instance and return it.

     ///

     /// Otherwise, returns `self.entries`

     pub fn keep_partial(mut self) -> Entries {

-        if let Some(partial_file) = self.partial_file {

-            if let Some(saved_file) = partial_file.dest {

-                self.entries.mut_files_for(partial_file.field_name).push(saved_file);

+        if let Some(partial) = self.partial {

+            if let Some(saved) = partial.dest {

+                self.entries.push_field(partial.source.headers, saved);

             }

         }

 

@@ -605,14 +888,14 @@
 /// Shorthand result for methods that return `Entries`

 pub type EntriesSaveResult<M> = SaveResult<Entries, PartialEntries<M>>;

 

-/// Shorthand result for methods that return `SavedFile`s.

+/// Shorthand result for methods that return `FieldData`s.

 ///

-/// The `MultipartFile` is not provided here because it is not necessary to return

+/// The `MultipartData` is not provided here because it is not necessary to return

 /// a borrow when the owned version is probably in the same scope. This hopefully

 /// saves some headache with the borrow-checker.

-pub type FileSaveResult = SaveResult<SavedFile, SavedFile>;

+pub type FieldSaveResult = SaveResult<SavedData, SavedData>;

 

-impl<M> EntriesSaveResult<M> {

+impl<M: ReadEntry> EntriesSaveResult<M> {

     /// Take the `Entries` from `self`, if applicable, and discarding

     /// the error, if any.

     pub fn into_entries(self) -> Option<Entries> {

@@ -643,7 +926,7 @@
     pub fn into_opt_both(self) -> (Option<S>, Option<io::Error>) {

         match self {

             Full(full)  => (Some(full), None),

-            Partial(partial, PartialReason::IoError(e)) => (Some(partial.into()), Some(e)),

+            Partial(partial, IoError(e)) => (Some(partial.into()), Some(e)),

             Partial(partial, _) => (Some(partial.into()), None),

             Error(error) => (None, Some(error)),

         }

@@ -685,7 +968,18 @@
     }

 }

 

-fn try_copy_buf<R: BufRead, W: Write>(mut src: R, mut dest: W) -> SaveResult<u64, u64> {

+fn try_copy_limited<R: BufRead, Wb: FnMut(&[u8]) -> SaveResult<usize, usize>>(src: R, mut with_buf: Wb, limit: u64) -> SaveResult<u64, u64> {

+    let mut copied = 0u64;

+    try_read_buf(src, |buf| {

+        let new_copied = copied.saturating_add(buf.len() as u64);

+        if new_copied > limit { return Partial(0, PartialReason::SizeLimit) }

+        copied = new_copied;

+

+        with_buf(buf)

+    })

+}

+

+fn try_read_buf<R: BufRead, Wb: FnMut(&[u8]) -> SaveResult<usize, usize>>(mut src: R, mut with_buf: Wb) -> SaveResult<u64, u64> {

     let mut total_copied = 0u64;

 

     macro_rules! try_here (

@@ -703,7 +997,7 @@
         let res = {

             let buf = try_here!(src.fill_buf());

             if buf.is_empty() { break; }

-            try_write_all(buf, &mut dest)

+            with_buf(buf)

         };

 

         match res {

@@ -721,7 +1015,7 @@
     Full(total_copied)

 }

 

-fn try_write_all<W>(mut buf: &[u8], mut dest: W) -> SaveResult<usize, usize> where W: Write {

+fn try_write_all<W: Write>(mut buf: &[u8], mut dest: W) -> SaveResult<usize, usize> {

     let mut total_copied = 0;

 

     macro_rules! try_here (

diff --git a/rustc_deps/vendor/multipart/src/server/tiny_http.rs b/rustc_deps/vendor/multipart/src/server/tiny_http.rs
index 20b4015..cce9a08 100644
--- a/rustc_deps/vendor/multipart/src/server/tiny_http.rs
+++ b/rustc_deps/vendor/multipart/src/server/tiny_http.rs
@@ -14,7 +14,7 @@
     type Body = &'r mut Read;

     

     fn multipart_boundary(&self) -> Option<&str> {

-        const BOUNDARY: &'static str = "boundary=";

+        const BOUNDARY: &str = "boundary=";

 

         let content_type = try_opt!(self.headers().iter().find(|header| header.field.equiv("Content-Type"))).value.as_str();

         let start = try_opt!(content_type.find(BOUNDARY)) + BOUNDARY.len();

diff --git a/rustc_deps/vendor/rouille/.cargo-checksum.json b/rustc_deps/vendor/rouille/.cargo-checksum.json
index 8da722b..cdcb079 100644
--- a/rustc_deps/vendor/rouille/.cargo-checksum.json
+++ b/rustc_deps/vendor/rouille/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"070b2069e7dc80963bdc83028750da89da60e6d9a4f959deef1161f3b7a4c0bf","Cargo.toml":"99addf0980c31785bd3e429adef43e0f6e130a02307eec3cf995aefead0b9f7f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"44e02b12ec9bb370439dfea9463efa373466c640cef41fe6a15167b74c3e790e","README.md":"bc57e7a4170a2a465d7b3a26d1a22142a9c0d65f89e9c98a5af0020ffc2ab79c","examples/database.rs":"7bb74e9ac9a91e168b3e84566f8680f2bc335e8bf46f13f883868d44322ac111","examples/git-http-backend.rs":"87465c10a75712899bd161602ae4ea91b38eb64342ff261e82a798051e481314","examples/hello-world.rs":"b29aad61387a286a2d5bcaf0aa072c94e21302fbab01b14c51fcd807e23fd4fd","examples/login-session.rs":"3cf89d7bbf020a4ba784c2b833573e2e08fb2cc0204349fba67a10847411f703","examples/php-test.php":"3d16114c5e1f1ecec1ec9c0ded0abb3e17389f7e472b19d0ba31f3bd53eefa33","examples/php.rs":"c6f5a380809bb38697f73bcf184c755a23e1fe1df4a01c22b3d9d7f1c8798590","examples/reverse-proxy.rs":"bd7a6371ab0b1f576c9f6135eefaf2e0339c0c9d9683fe5d6c5900224f9cc9ef","examples/simple-form.rs":"a2b0d076a6c163c82e0f9cffb21c25c53d1d7ff58adae5a1e58500f456c81ee3","examples/static-files.rs":"b2301f55716d1824655b6d4210f9e218b2d6f7cf272af8a4bc9931d6f8bf5ffe","examples/websocket.rs":"7f5836eaf8c2c3bb3093c05df640456887b51dc96b83590fb85a03e494899796","src/assets.rs":"9742e9c62d65d27809cc860417c3e04c4806a3fafb6d90b4ab617c3848fafac0","src/cgi.rs":"6965c6cf2c6587eabc622dcb47da73331dc48947183fadc221be6aabbabfa9d6","src/content_encoding.rs":"70e09a652868e9784faafd274c8dababaecdb156ec7b5999f1dd6b09619a2ecd","src/find_route.rs":"0c4db702e2e104950b7510d7b03e867248574e471c384965d2e3f2b9a643901d","src/input/accept.rs":"affb276c4fae6a29fe272982acc07fe188919e901081712f1b44df5e44acfd78","src/input/basic_http_auth.rs":"8e0ebe8d11d2258eca5bab092ae476943e79b34a1b8d7e78c7f7beea17159876","src/input/cookies.rs":"6e76659ee8107b0285ca0e20ba2523a939252059228ee6196958fcb24e318524","src/input/json.rs":"9da51db0e1e6697cda5683b822b8657c654dc691e06985b05bac16db2aa27ac4","src/input/mod.rs":"b9c1cdb7cd9aeb599c67aa2198e323eaffe7565aad3867321d523139b8d11f1d","src/input/multipart.rs":"e41cc3722c9c1e13aaa18aa803e3b70d2b0bc0a6328f33feba31b0c652a07824","src/input/plain.rs":"44aa9fd68bc3385c247b40681edf17a842157eed8e4fc51c9957dcac56929db9","src/input/post.rs":"5dc317decfc43cdec0b71487b18d4274f891d151aa0ecd6162e2b184732da9b0","src/input/priority_header.rs":"6b4bb74dc97f05b4f4014378484a71cc0ef881bf56e52e1e2a5c042579219504","src/lib.rs":"2fa55ee1bfe4b14c84b0b3865e82b7ed81c8d39e5361c49d9e639491d28d9e54","src/log.rs":"f49f0cee0a33c1491c1299593f920defab18f9238b7eab28f9e628d88b2ea8ba","src/proxy.rs":"04d3167f796b17099dd8b4e1221a93d77ce8c73c9d898c699bd0ed3414e47438","src/response.rs":"240040ddf9539c882ffed13265e77bbe49ec34ac8a8f6e5138158dd9dfc080ca","src/router.rs":"69fe31163092a4be32be76d31677fbbdca40a98cbab68a397ef77e81a67b07b2","src/session.rs":"35c268dc4b8510bc41148729db9e62f830c1b6d7e9f7c641137e9c944e52b5db","src/try_or_400.rs":"71fda6e3d70222c5c00b96b42d7136bbee417e4474ecde212bb5949eae8816ef","src/websocket/low_level.rs":"2fb137c26fda7af0518f51422b0e50d2114ac49527f707bd50732fc906dde119","src/websocket/mod.rs":"012a913407c5f61b8ccdf4003bd00c3358a859209ea6f79b8b62ae9b3e439c66","src/websocket/websocket.rs":"1e8cf0b0338e58453056220ab53d10259d72d52b3c8f57cc14de7e24960f9f52"},"package":"0845b9c39ba772da769fe2aaa4d81bfd10695a7ea051d0510702260ff4159841"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"0083890a604a1a8097cfa1e3ca9dda63bdfd32d793a4570132288adce6ca3d21","Cargo.toml":"603ffba0da62dbc236e83a112647b8c4cd0de9cf03af07b9ba647d19e2e43ac3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"44e02b12ec9bb370439dfea9463efa373466c640cef41fe6a15167b74c3e790e","README.md":"bc57e7a4170a2a465d7b3a26d1a22142a9c0d65f89e9c98a5af0020ffc2ab79c","examples/database.rs":"7bb74e9ac9a91e168b3e84566f8680f2bc335e8bf46f13f883868d44322ac111","examples/git-http-backend.rs":"87465c10a75712899bd161602ae4ea91b38eb64342ff261e82a798051e481314","examples/hello-world.rs":"b29aad61387a286a2d5bcaf0aa072c94e21302fbab01b14c51fcd807e23fd4fd","examples/login-session.rs":"3cf89d7bbf020a4ba784c2b833573e2e08fb2cc0204349fba67a10847411f703","examples/php-test.php":"3d16114c5e1f1ecec1ec9c0ded0abb3e17389f7e472b19d0ba31f3bd53eefa33","examples/php.rs":"c6f5a380809bb38697f73bcf184c755a23e1fe1df4a01c22b3d9d7f1c8798590","examples/reverse-proxy.rs":"bd7a6371ab0b1f576c9f6135eefaf2e0339c0c9d9683fe5d6c5900224f9cc9ef","examples/simple-form.rs":"a2b0d076a6c163c82e0f9cffb21c25c53d1d7ff58adae5a1e58500f456c81ee3","examples/static-files.rs":"b2301f55716d1824655b6d4210f9e218b2d6f7cf272af8a4bc9931d6f8bf5ffe","examples/websocket.rs":"7f5836eaf8c2c3bb3093c05df640456887b51dc96b83590fb85a03e494899796","src/assets.rs":"1dc4605a85b475af93956e05dbc45b915437b91e149ae563fc82a36d9b866db9","src/cgi.rs":"6965c6cf2c6587eabc622dcb47da73331dc48947183fadc221be6aabbabfa9d6","src/content_encoding.rs":"70e09a652868e9784faafd274c8dababaecdb156ec7b5999f1dd6b09619a2ecd","src/find_route.rs":"0c4db702e2e104950b7510d7b03e867248574e471c384965d2e3f2b9a643901d","src/input/accept.rs":"affb276c4fae6a29fe272982acc07fe188919e901081712f1b44df5e44acfd78","src/input/basic_http_auth.rs":"8e0ebe8d11d2258eca5bab092ae476943e79b34a1b8d7e78c7f7beea17159876","src/input/cookies.rs":"6e76659ee8107b0285ca0e20ba2523a939252059228ee6196958fcb24e318524","src/input/json.rs":"9da51db0e1e6697cda5683b822b8657c654dc691e06985b05bac16db2aa27ac4","src/input/mod.rs":"b9c1cdb7cd9aeb599c67aa2198e323eaffe7565aad3867321d523139b8d11f1d","src/input/multipart.rs":"fadf47ddbb439a3c4555ba4b0980253e552daa23e0398bf612dc173c7593cad6","src/input/plain.rs":"44aa9fd68bc3385c247b40681edf17a842157eed8e4fc51c9957dcac56929db9","src/input/post.rs":"9ab6265b9a85734866ec4cdab43e9993eb831697b863c08b9c5c2676d724336e","src/input/priority_header.rs":"6b4bb74dc97f05b4f4014378484a71cc0ef881bf56e52e1e2a5c042579219504","src/lib.rs":"2fa55ee1bfe4b14c84b0b3865e82b7ed81c8d39e5361c49d9e639491d28d9e54","src/log.rs":"f49f0cee0a33c1491c1299593f920defab18f9238b7eab28f9e628d88b2ea8ba","src/proxy.rs":"04d3167f796b17099dd8b4e1221a93d77ce8c73c9d898c699bd0ed3414e47438","src/response.rs":"240040ddf9539c882ffed13265e77bbe49ec34ac8a8f6e5138158dd9dfc080ca","src/router.rs":"69fe31163092a4be32be76d31677fbbdca40a98cbab68a397ef77e81a67b07b2","src/session.rs":"36d51735997c39eaccf96237a1f602b23a8487cd62bae53e67585f689d1f7a79","src/try_or_400.rs":"71fda6e3d70222c5c00b96b42d7136bbee417e4474ecde212bb5949eae8816ef","src/websocket/low_level.rs":"2fb137c26fda7af0518f51422b0e50d2114ac49527f707bd50732fc906dde119","src/websocket/mod.rs":"012a913407c5f61b8ccdf4003bd00c3358a859209ea6f79b8b62ae9b3e439c66","src/websocket/websocket.rs":"1e8cf0b0338e58453056220ab53d10259d72d52b3c8f57cc14de7e24960f9f52"},"package":"112568052ec17fa26c6c11c40acbb30d3ad244bf3d6da0be181f5e7e42e5004f"}
\ No newline at end of file
diff --git a/rustc_deps/vendor/rouille/CHANGELOG.md b/rustc_deps/vendor/rouille/CHANGELOG.md
index 057d046..9d4f9d5 100644
--- a/rustc_deps/vendor/rouille/CHANGELOG.md
+++ b/rustc_deps/vendor/rouille/CHANGELOG.md
@@ -2,6 +2,10 @@
 

 ## Version [Unreleased]

 

+## Version 3.0.0

+

+- Bumped the `multipart` crate and updated rouille's mutipart API to match it.

+

 ## Version 2.2.0

 

 - Bump minimum supported Rust version to 1.20.0.

diff --git a/rustc_deps/vendor/rouille/Cargo.toml b/rustc_deps/vendor/rouille/Cargo.toml
index d147f35..7a52e27 100644
--- a/rustc_deps/vendor/rouille/Cargo.toml
+++ b/rustc_deps/vendor/rouille/Cargo.toml
@@ -12,7 +12,7 @@
 
 [package]
 name = "rouille"
-version = "2.2.0"
+version = "3.0.0"
 authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"]
 description = "High-level idiomatic web framework."
 documentation = "http://docs.rs/rouille"
@@ -40,7 +40,7 @@
 version = "0.2.0"
 
 [dependencies.multipart]
-version = "0.13.6"
+version = "0.15"
 features = ["server"]
 default-features = false
 
@@ -48,7 +48,7 @@
 version = "1"
 
 [dependencies.rand]
-version = "0.4.2"
+version = "0.5"
 
 [dependencies.serde]
 version = "1"
diff --git a/rustc_deps/vendor/rouille/src/assets.rs b/rustc_deps/vendor/rouille/src/assets.rs
index 7a7a505..f2d538d 100644
--- a/rustc_deps/vendor/rouille/src/assets.rs
+++ b/rustc_deps/vendor/rouille/src/assets.rs
@@ -628,6 +628,7 @@
         Some("vsw") => "application/vnd.visio",
         Some("vsx") => "application/vnd.visio",
         Some("vtx") => "application/vnd.visio",
+        Some("wasm") => "application/wasm",
         Some("wav") => "audio/wav",
         Some("wave") => "audio/wav",
         Some("wax") => "audio/x-ms-wax",
diff --git a/rustc_deps/vendor/rouille/src/input/multipart.rs b/rustc_deps/vendor/rouille/src/input/multipart.rs
index d5eaa46..e6b2924 100644
--- a/rustc_deps/vendor/rouille/src/input/multipart.rs
+++ b/rustc_deps/vendor/rouille/src/input/multipart.rs
@@ -23,7 +23,6 @@
 // TODO: provide wrappers around these
 pub use multipart::server::MultipartField;
 pub use multipart::server::MultipartData;
-pub use multipart::server::MultipartFile;
 
 /// Error that can happen when decoding multipart data.
 #[derive(Clone, Debug)]
diff --git a/rustc_deps/vendor/rouille/src/input/post.rs b/rustc_deps/vendor/rouille/src/input/post.rs
index a1a7853..7573cf9 100644
--- a/rustc_deps/vendor/rouille/src/input/post.rs
+++ b/rustc_deps/vendor/rouille/src/input/post.rs
@@ -573,52 +573,52 @@
                     },
                 };
 
-                while let Some(multipart_entry) = multipart.next() {
+                while let Some(mut multipart_entry) = multipart.next() {
                     $(
-                        if multipart_entry.name == stringify!($field) {
+                        if multipart_entry.headers.name.as_ref() == stringify!($field) {
                             let config = ();
                             $(
                                 let config = $config;
                             )* 
 
-                            match multipart_entry.data {
-                                multipart::MultipartData::Text(txt) => {
-                                    let decoded = match DecodePostField::from_field(config, &txt.text) {
-                                        Ok(d) => d,
-                                        Err(err) => return Err(PostError::Field {
-                                            field: stringify!($field).into(),
-                                            error: err,
-                                        }),
-                                    };
-                                    match merge(&mut $field, decoded) {
-                                        Ok(d) => d,
-                                        Err(err) => return Err(PostError::Field {
-                                            field: stringify!($field).into(),
-                                            error: err,
-                                        }),
-                                    };
-                                },
-                                multipart::MultipartData::File(f) => {
-                                    let name = f.filename.as_ref().map(|n| n.to_owned());
-                                    let name = name.as_ref().map(|n| &n[..]);
-                                    let mime = f.content_type.to_string();
-                                    let decoded = match DecodePostField::from_file(config, f, name, &mime) {
-                                        Ok(d) => d,
-                                        Err(err) => return Err(PostError::Field {
-                                            field: stringify!($field).into(),
-                                            error: err,
-                                        }),
-                                    };
-                                    match merge(&mut $field, decoded) {
-                                        Ok(d) => d,
-                                        Err(err) => return Err(PostError::Field {
-                                            field: stringify!($field).into(),
-                                            error: err,
-                                        }),
-                                    };
-                                },
+                            if multipart_entry.is_text() {
+                                let mut text = String::new();
+                                multipart_entry.data.read_to_string(&mut text)?;
+                                let decoded = match DecodePostField::from_field(config, &text) {
+                                    Ok(d) => d,
+                                    Err(err) => return Err(PostError::Field {
+                                        field: stringify!($field).into(),
+                                        error: err,
+                                    }),
+                                };
+                                match merge(&mut $field, decoded) {
+                                    Ok(d) => d,
+                                    Err(err) => return Err(PostError::Field {
+                                        field: stringify!($field).into(),
+                                        error: err,
+                                    }),
+                                };
+                            } else {
+                                let name = multipart_entry.headers.filename.as_ref().map(|n| n.to_owned());
+                                let name = name.as_ref().map(|n| &n[..]);
+                                let mime = multipart_entry.headers.content_type
+                                    .map(|m| m.to_string())
+                                    .unwrap_or_else(String::new);
+                                let decoded = match DecodePostField::from_file(config, multipart_entry.data, name, &mime) {
+                                    Ok(d) => d,
+                                    Err(err) => return Err(PostError::Field {
+                                        field: stringify!($field).into(),
+                                        error: err,
+                                    }),
+                                };
+                                match merge(&mut $field, decoded) {
+                                    Ok(d) => d,
+                                    Err(err) => return Err(PostError::Field {
+                                        field: stringify!($field).into(),
+                                        error: err,
+                                    }),
+                                };
                             }
-
                             continue;
                         }
                     )*
diff --git a/rustc_deps/vendor/rouille/src/session.rs b/rustc_deps/vendor/rouille/src/session.rs
index 7dd5701..52842a2 100644
--- a/rustc_deps/vendor/rouille/src/session.rs
+++ b/rustc_deps/vendor/rouille/src/session.rs
@@ -38,6 +38,7 @@
 use std::sync::atomic::Ordering;

 use rand;

 use rand::Rng;

+use rand::distributions::Alphanumeric;

 

 use Request;

 use Response;

@@ -115,7 +116,7 @@
 pub fn generate_session_id() -> String {

     // 5e+114 possibilities is reasonable.

     rand::OsRng::new().expect("Failed to initialize OsRng")     // TODO: <- handle that?

-                      .gen_ascii_chars()

+                      .sample_iter(&Alphanumeric)

                       .filter(|&c| (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||

                                    (c >= '0' && c <= '9'))

                       .take(64).collect::<String>()